kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,771 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import threading
|
|
3
|
+
import concurrent.futures
|
|
4
|
+
import os
|
|
5
|
+
import atexit
|
|
6
|
+
from typing import List, Dict, Any
|
|
7
|
+
from dolphin.core.logging.logger import console, get_logger
|
|
8
|
+
from dolphin.core.skill.skillkit import Skillkit
|
|
9
|
+
from dolphin.core.skill.skill_function import SkillFunction
|
|
10
|
+
from dolphin.lib.skillkits.mcp_adapter import (
|
|
11
|
+
MCPAdapter,
|
|
12
|
+
MCPServerConfig,
|
|
13
|
+
_connection_pool,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Thread-local storage for event loop reuse
|
|
18
|
+
_thread_local = threading.local()
|
|
19
|
+
|
|
20
|
+
logger = get_logger("skill.mcp_skillkit")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def get_or_create_event_loop():
|
|
24
|
+
"""Get or create a thread-local event loop"""
|
|
25
|
+
try:
|
|
26
|
+
# Check if we already have a valid loop
|
|
27
|
+
if hasattr(_thread_local, "loop") and not _thread_local.loop.is_closed():
|
|
28
|
+
return _thread_local.loop
|
|
29
|
+
|
|
30
|
+
# Create new event loop
|
|
31
|
+
_thread_local.loop = asyncio.new_event_loop()
|
|
32
|
+
asyncio.set_event_loop(_thread_local.loop)
|
|
33
|
+
|
|
34
|
+
# Set the exception handler for the event loop
|
|
35
|
+
def exception_handler(loop, context):
|
|
36
|
+
exception = context.get("exception")
|
|
37
|
+
if exception:
|
|
38
|
+
logger.error(f"Event loop exception: {exception}")
|
|
39
|
+
else:
|
|
40
|
+
logger.error(f"Event loop error: {context}")
|
|
41
|
+
|
|
42
|
+
_thread_local.loop.set_exception_handler(exception_handler)
|
|
43
|
+
|
|
44
|
+
return _thread_local.loop
|
|
45
|
+
|
|
46
|
+
except Exception as e:
|
|
47
|
+
# Fallback: return current event loop or create one
|
|
48
|
+
logger.error(f"Error creating event loop: {e}")
|
|
49
|
+
try:
|
|
50
|
+
loop = asyncio.get_event_loop()
|
|
51
|
+
if loop.is_closed():
|
|
52
|
+
raise RuntimeError("Current loop is closed")
|
|
53
|
+
return loop
|
|
54
|
+
except:
|
|
55
|
+
# Last resort: create a basic loop
|
|
56
|
+
return asyncio.new_event_loop()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# Global thread pool manager, avoiding frequent creation and destruction of ThreadPoolExecutor
|
|
60
|
+
class GlobalThreadPoolManager:
|
|
61
|
+
"""Global thread pool manager, used for asynchronous task execution"""
|
|
62
|
+
|
|
63
|
+
def __init__(self):
|
|
64
|
+
self._executor = None
|
|
65
|
+
self._lock = threading.RLock() # Using RLock allows the same thread to acquire it multiple times
|
|
66
|
+
self._shutdown = False
|
|
67
|
+
|
|
68
|
+
def get_executor(self) -> concurrent.futures.ThreadPoolExecutor:
|
|
69
|
+
"""Get thread pool executor"""
|
|
70
|
+
if self._shutdown:
|
|
71
|
+
raise RuntimeError("Thread pool has been shut down")
|
|
72
|
+
|
|
73
|
+
with self._lock:
|
|
74
|
+
if self._executor is None or getattr(self._executor, "_shutdown", True):
|
|
75
|
+
# Dynamically adjust thread pool size
|
|
76
|
+
max_workers = min(32, (os.cpu_count() or 1) + 4)
|
|
77
|
+
# Properly clean up old executor if exists
|
|
78
|
+
if self._executor is not None:
|
|
79
|
+
try:
|
|
80
|
+
self._executor.shutdown(wait=False)
|
|
81
|
+
except Exception:
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
self._executor = concurrent.futures.ThreadPoolExecutor(
|
|
85
|
+
max_workers=max_workers, thread_name_prefix="mcp_async_"
|
|
86
|
+
)
|
|
87
|
+
return self._executor
|
|
88
|
+
|
|
89
|
+
def shutdown(self):
|
|
90
|
+
"""Close thread pool"""
|
|
91
|
+
# First set the shutdown flag to prevent new requests
|
|
92
|
+
self._shutdown = True
|
|
93
|
+
|
|
94
|
+
# Use non-blocking locks to avoid deadlocks
|
|
95
|
+
lock_acquired = False
|
|
96
|
+
try:
|
|
97
|
+
lock_acquired = self._lock.acquire(
|
|
98
|
+
timeout=1.0
|
|
99
|
+
) # Use timeout instead of blocking=False
|
|
100
|
+
if lock_acquired:
|
|
101
|
+
if self._executor and not getattr(self._executor, "_shutdown", True):
|
|
102
|
+
try:
|
|
103
|
+
self._executor.shutdown(wait=False)
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.warning(f"Error shutting down executor: {e}")
|
|
106
|
+
finally:
|
|
107
|
+
self._executor = None
|
|
108
|
+
else:
|
|
109
|
+
# If we can't get the lock, force shutdown flag
|
|
110
|
+
logger.warning("Could not acquire lock for shutdown, forcing cleanup")
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f"Error during shutdown: {e}")
|
|
113
|
+
finally:
|
|
114
|
+
if lock_acquired:
|
|
115
|
+
try:
|
|
116
|
+
self._lock.release()
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
# Global thread pool instance
|
|
122
|
+
_global_thread_pool = GlobalThreadPoolManager()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class MCPSkillkit(Skillkit):
|
|
126
|
+
"""MCP Skill Suite - Connection Pool Fix Version"""
|
|
127
|
+
|
|
128
|
+
# Class-level adapter cache to avoid repeated creation
|
|
129
|
+
_adapter_cache: Dict[str, MCPAdapter] = {}
|
|
130
|
+
_cache_lock = threading.RLock() # Add thread lock to protect cache
|
|
131
|
+
_instances: List["MCPSkillkit"] = [] # Track all instances
|
|
132
|
+
_cleanup_registered = False
|
|
133
|
+
|
|
134
|
+
def __init__(self):
|
|
135
|
+
"""Initialize MCP skill suite"""
|
|
136
|
+
super().__init__()
|
|
137
|
+
self.skills_cache: List[SkillFunction] = []
|
|
138
|
+
self.server_configs: Dict[str, MCPServerConfig] = {}
|
|
139
|
+
self.initialized = False
|
|
140
|
+
|
|
141
|
+
# Register Instance
|
|
142
|
+
with self._cache_lock:
|
|
143
|
+
self._instances.append(self)
|
|
144
|
+
|
|
145
|
+
# Register cleanup handler (register only once)
|
|
146
|
+
if not self._cleanup_registered:
|
|
147
|
+
atexit.register(self._cleanup_all_instances)
|
|
148
|
+
self._cleanup_registered = True
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
def _cleanup_all_instances(cls):
|
|
152
|
+
"""Clean up all instances - atexit-safe version"""
|
|
153
|
+
try:
|
|
154
|
+
logger.debug("Starting instance cleanup (atexit context)")
|
|
155
|
+
|
|
156
|
+
# Use non-blocking locks in atexit to avoid deadlocks
|
|
157
|
+
lock_acquired = False
|
|
158
|
+
try:
|
|
159
|
+
lock_acquired = cls._cache_lock.acquire(blocking=False)
|
|
160
|
+
if lock_acquired:
|
|
161
|
+
instances_to_cleanup = list(cls._instances)
|
|
162
|
+
cls._instances.clear()
|
|
163
|
+
else:
|
|
164
|
+
# If the lock cannot be acquired, directly copy the instance list for cleanup.
|
|
165
|
+
logger.debug("Could not acquire lock in atexit, using fallback")
|
|
166
|
+
instances_to_cleanup = (
|
|
167
|
+
list(cls._instances) if hasattr(cls, "_instances") else []
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Clean up instance
|
|
171
|
+
for instance in instances_to_cleanup:
|
|
172
|
+
try:
|
|
173
|
+
instance.shutdown()
|
|
174
|
+
except Exception as e:
|
|
175
|
+
logger.warning(f"Error shutting down instance: {e}")
|
|
176
|
+
|
|
177
|
+
finally:
|
|
178
|
+
if lock_acquired:
|
|
179
|
+
cls._cache_lock.release()
|
|
180
|
+
|
|
181
|
+
logger.debug("Instance cleanup completed")
|
|
182
|
+
except Exception as e:
|
|
183
|
+
logger.error(f"Error during cleanup: {e}")
|
|
184
|
+
|
|
185
|
+
def getName(self) -> str:
|
|
186
|
+
return "mcp_skillkit"
|
|
187
|
+
|
|
188
|
+
def setGlobalConfig(self, globalConfig):
|
|
189
|
+
"""Set global context"""
|
|
190
|
+
super().setGlobalConfig(globalConfig)
|
|
191
|
+
|
|
192
|
+
if (
|
|
193
|
+
hasattr(globalConfig, "mcp_config")
|
|
194
|
+
and globalConfig.mcp_config
|
|
195
|
+
and globalConfig.mcp_config.enabled
|
|
196
|
+
):
|
|
197
|
+
self._initialize_mcp_skills()
|
|
198
|
+
|
|
199
|
+
def _initialize_mcp_skills(self):
|
|
200
|
+
"""Initialize MCP skill"""
|
|
201
|
+
logger.debug("开始初始化MCP技能...")
|
|
202
|
+
try:
|
|
203
|
+
mcp_config = self.globalConfig.mcp_config
|
|
204
|
+
skill_config = self.globalConfig.skill_config
|
|
205
|
+
|
|
206
|
+
logger.debug(f"发现 {len(mcp_config.servers)} 个服务器配置")
|
|
207
|
+
|
|
208
|
+
for i, server_config in enumerate(mcp_config.servers):
|
|
209
|
+
logger.debug(
|
|
210
|
+
f"处理服务器 {i + 1}/{len(mcp_config.servers)}: {server_config.name}"
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
if not server_config.enabled:
|
|
214
|
+
logger.debug(f"跳过禁用的服务器: {server_config.name}")
|
|
215
|
+
continue
|
|
216
|
+
|
|
217
|
+
# Check whether this MCP server should be loaded
|
|
218
|
+
should_load = skill_config.should_load_mcp_server(server_config.name)
|
|
219
|
+
logger.debug(f"服务器 {server_config.name} 是否应该加载: {should_load}")
|
|
220
|
+
if not should_load:
|
|
221
|
+
logger.debug(
|
|
222
|
+
f"Skipping MCP server {server_config.name} (disabled by configuration)"
|
|
223
|
+
)
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
logger.debug(f"开始初始化服务器: {server_config.name}")
|
|
228
|
+
|
|
229
|
+
# Save server configuration
|
|
230
|
+
self.server_configs[server_config.name] = server_config
|
|
231
|
+
logger.debug(f"保存服务器配置: {server_config.name}")
|
|
232
|
+
|
|
233
|
+
# Create and cache adapter (singleton pattern) - thread-safe
|
|
234
|
+
with self._cache_lock:
|
|
235
|
+
if server_config.name not in self._adapter_cache:
|
|
236
|
+
logger.debug(f"创建适配器: {server_config.name}")
|
|
237
|
+
adapter = MCPAdapter(server_config)
|
|
238
|
+
self._adapter_cache[server_config.name] = adapter
|
|
239
|
+
logger.debug(
|
|
240
|
+
f"Created MCP adapter for server: {server_config.name}"
|
|
241
|
+
)
|
|
242
|
+
else:
|
|
243
|
+
logger.debug(f"使用已存在的适配器: {server_config.name}")
|
|
244
|
+
|
|
245
|
+
# Load tools
|
|
246
|
+
logger.debug(f"即将加载服务器 {server_config.name} 的工具...")
|
|
247
|
+
self._load_tools_for_server(server_config)
|
|
248
|
+
logger.debug(f"完成加载服务器 {server_config.name} 的工具")
|
|
249
|
+
|
|
250
|
+
except Exception as e:
|
|
251
|
+
import traceback
|
|
252
|
+
|
|
253
|
+
logger.error(
|
|
254
|
+
f"Failed to initialize server {server_config.name}: {e}"
|
|
255
|
+
)
|
|
256
|
+
logger.error(f"完整错误信息: {traceback.format_exc()}")
|
|
257
|
+
continue
|
|
258
|
+
|
|
259
|
+
self.initialized = True
|
|
260
|
+
logger.debug(
|
|
261
|
+
f"MCP skillkit initialized with {len(self.server_configs)} servers, {len(self.skills_cache)} skills"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
import traceback
|
|
266
|
+
|
|
267
|
+
logger.error(f"Failed to initialize MCP skillkit: {e}")
|
|
268
|
+
logger.error(f"初始化failed完整错误: {traceback.format_exc()}")
|
|
269
|
+
|
|
270
|
+
def _load_tools_for_server(self, server_config: MCPServerConfig):
|
|
271
|
+
"""Load tools for the server"""
|
|
272
|
+
try:
|
|
273
|
+
# Cached Adapter - Thread Safe
|
|
274
|
+
with self._cache_lock:
|
|
275
|
+
adapter = self._adapter_cache.get(server_config.name)
|
|
276
|
+
if not adapter:
|
|
277
|
+
raise Exception(
|
|
278
|
+
f"No adapter found for server: {server_config.name}"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
logger.debug(f"开始加载服务器 {server_config.name} 的工具...")
|
|
282
|
+
|
|
283
|
+
# Get tools using synchronous methods
|
|
284
|
+
tools = self._get_tools_sync(adapter)
|
|
285
|
+
|
|
286
|
+
logger.debug(f"从服务器 {server_config.name} Get到 {len(tools)} 个工具")
|
|
287
|
+
|
|
288
|
+
if len(tools) == 0:
|
|
289
|
+
logger.warning(f"服务器 {server_config.name} 没有返回任何工具")
|
|
290
|
+
return
|
|
291
|
+
|
|
292
|
+
console(f"📝 MCP Skill registered: {server_config.name}")
|
|
293
|
+
|
|
294
|
+
# Create skills for each tool
|
|
295
|
+
created_skills = 0
|
|
296
|
+
for tool in tools:
|
|
297
|
+
try:
|
|
298
|
+
skill_func, custom_schema = self._create_skill_function(
|
|
299
|
+
server_config.name, tool
|
|
300
|
+
)
|
|
301
|
+
# Create SkillFunction with custom schema
|
|
302
|
+
self.skills_cache.append(SkillFunction(skill_func, custom_schema))
|
|
303
|
+
logger.debug(f"Loaded tool: {tool['name']}")
|
|
304
|
+
created_skills += 1
|
|
305
|
+
except Exception as e:
|
|
306
|
+
logger.error(f"创建技能failed {tool.get('name', 'unknown')}: {e}")
|
|
307
|
+
|
|
308
|
+
logger.debug(
|
|
309
|
+
f"successful加载 {created_skills}/{len(tools)} 个工具从服务器 {server_config.name}"
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
import traceback
|
|
314
|
+
|
|
315
|
+
logger.error(f"Failed to load tools for server {server_config.name}: {e}")
|
|
316
|
+
logger.error(f"完整错误堆栈: {traceback.format_exc()}")
|
|
317
|
+
|
|
318
|
+
def _get_tools_sync(self, adapter: MCPAdapter) -> List[Dict[str, Any]]:
|
|
319
|
+
"""Synchronously obtain tool list"""
|
|
320
|
+
logger.debug(f"开始同步Get工具列表,适配器: {adapter.config.name}")
|
|
321
|
+
try:
|
|
322
|
+
# Use the global thread pool manager to avoid frequent creation of ThreadPoolExecutor
|
|
323
|
+
def run_async_in_thread():
|
|
324
|
+
# Use thread-local event loops to avoid frequent creation
|
|
325
|
+
try:
|
|
326
|
+
logger.debug(
|
|
327
|
+
f"在线程 {threading.current_thread().name} 中创建事件循环"
|
|
328
|
+
)
|
|
329
|
+
loop = get_or_create_event_loop()
|
|
330
|
+
logger.debug("开始调用 adapter.get_available_tools_from_pool()")
|
|
331
|
+
result = loop.run_until_complete(
|
|
332
|
+
adapter.get_available_tools_from_pool()
|
|
333
|
+
)
|
|
334
|
+
logger.debug(f"Get工具列表successful,返回 {len(result)} 个工具")
|
|
335
|
+
return result
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error(f"Error in async thread: {e}")
|
|
339
|
+
import traceback
|
|
340
|
+
|
|
341
|
+
logger.error(f"Async thread traceback: {traceback.format_exc()}")
|
|
342
|
+
raise
|
|
343
|
+
|
|
344
|
+
# Execute asynchronous tasks using the global thread pool
|
|
345
|
+
global _global_thread_pool
|
|
346
|
+
logger.debug("Get全局线程池")
|
|
347
|
+
executor = _global_thread_pool.get_executor()
|
|
348
|
+
logger.debug(f"提交任务到线程池,超时时间: {adapter.config.timeout}s")
|
|
349
|
+
future = executor.submit(run_async_in_thread)
|
|
350
|
+
logger.debug("等待任务执行结果...")
|
|
351
|
+
result = future.result(timeout=adapter.config.timeout)
|
|
352
|
+
logger.debug(f"同步Get工具列表successful,返回 {len(result)} 个工具")
|
|
353
|
+
return result
|
|
354
|
+
|
|
355
|
+
except concurrent.futures.TimeoutError:
|
|
356
|
+
logger.error(
|
|
357
|
+
f"Timeout getting tools from {adapter.config.name} after {adapter.config.timeout}s"
|
|
358
|
+
)
|
|
359
|
+
return []
|
|
360
|
+
except Exception as e:
|
|
361
|
+
import traceback
|
|
362
|
+
|
|
363
|
+
error_details = traceback.format_exc()
|
|
364
|
+
logger.error(f"Error getting tools from {adapter.config.name}: {e}")
|
|
365
|
+
logger.error(f"Full traceback: {error_details}")
|
|
366
|
+
return []
|
|
367
|
+
|
|
368
|
+
async def _call_tool_async(
|
|
369
|
+
self, adapter: MCPAdapter, tool_name: str, kwargs: Dict[str, Any]
|
|
370
|
+
) -> Any:
|
|
371
|
+
"""Async call tool - using connection reuse"""
|
|
372
|
+
try:
|
|
373
|
+
return await adapter.call_tool_with_connection_reuse(tool_name, kwargs)
|
|
374
|
+
except Exception as e:
|
|
375
|
+
logger.error(f"Error calling tool: {e}")
|
|
376
|
+
raise
|
|
377
|
+
|
|
378
|
+
def _create_skill_function(self, server_name: str, tool: Dict[str, Any]):
|
|
379
|
+
"""Create skill function - using cached adapter"""
|
|
380
|
+
tool_name = tool["name"]
|
|
381
|
+
tool_description = tool["description"]
|
|
382
|
+
tool_parameters = tool.get("parameters", {})
|
|
383
|
+
|
|
384
|
+
def skill_func(**kwargs) -> str:
|
|
385
|
+
"""MCP Skill Function - Simplified Asynchronous Call Strategy
|
|
386
|
+
|
|
387
|
+
This function adopts a more robust approach to handling asynchronous calls, avoiding complex event loop management
|
|
388
|
+
"""
|
|
389
|
+
try:
|
|
390
|
+
# Use cached adapters to avoid repeated creation - thread-safe
|
|
391
|
+
with self._cache_lock:
|
|
392
|
+
adapter = self._adapter_cache.get(server_name)
|
|
393
|
+
if adapter is None:
|
|
394
|
+
return f"Error: Adapter not found for server {server_name}"
|
|
395
|
+
|
|
396
|
+
# Define asynchronous calling function
|
|
397
|
+
async def async_call():
|
|
398
|
+
return await self._call_tool_async(adapter, tool_name, kwargs)
|
|
399
|
+
|
|
400
|
+
# Simplified asynchronous calling strategy - always use thread pool to avoid event loop conflicts
|
|
401
|
+
def run_async_in_thread():
|
|
402
|
+
"""Run an asynchronous function in a new thread"""
|
|
403
|
+
try:
|
|
404
|
+
# Using Thread-Local Event Loops
|
|
405
|
+
loop = get_or_create_event_loop()
|
|
406
|
+
return loop.run_until_complete(async_call())
|
|
407
|
+
except Exception as e:
|
|
408
|
+
logger.debug(
|
|
409
|
+
f"Error in async thread for {server_name}.{tool_name}: {e}"
|
|
410
|
+
)
|
|
411
|
+
raise
|
|
412
|
+
|
|
413
|
+
# Using the Global Thread Pool
|
|
414
|
+
global _global_thread_pool
|
|
415
|
+
executor = _global_thread_pool.get_executor()
|
|
416
|
+
|
|
417
|
+
# Set a reasonable timeout period
|
|
418
|
+
timeout = max(
|
|
419
|
+
60, adapter.config.timeout * 2
|
|
420
|
+
) # At least 60 seconds, or twice the configured timeout
|
|
421
|
+
|
|
422
|
+
future = executor.submit(run_async_in_thread)
|
|
423
|
+
result = future.result(timeout=timeout)
|
|
424
|
+
|
|
425
|
+
return str(result)
|
|
426
|
+
|
|
427
|
+
except Exception as e:
|
|
428
|
+
import traceback
|
|
429
|
+
|
|
430
|
+
error_details = traceback.format_exc()
|
|
431
|
+
logger.debug(
|
|
432
|
+
f"MCP tool execution failed: {server_name}.{tool_name}, error: {e}"
|
|
433
|
+
)
|
|
434
|
+
logger.debug(f"Full traceback: {error_details}")
|
|
435
|
+
return f"Error executing {server_name}.{tool_name}: {str(e)}"
|
|
436
|
+
|
|
437
|
+
# Set function metadata
|
|
438
|
+
skill_func.__name__ = f"{server_name}_{tool_name}"
|
|
439
|
+
|
|
440
|
+
# Generate detailed docstrings
|
|
441
|
+
docstring = self._generate_detailed_docstring(tool_description, tool_parameters)
|
|
442
|
+
skill_func.__doc__ = docstring
|
|
443
|
+
|
|
444
|
+
# Create a custom OpenAI tool schema
|
|
445
|
+
custom_schema = self._create_openai_tool_schema(
|
|
446
|
+
server_name, tool_name, tool_description, tool_parameters
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
return skill_func, custom_schema
|
|
450
|
+
|
|
451
|
+
def _createSkills(self) -> List[SkillFunction]:
|
|
452
|
+
"""Create skill list from the cached MCP skills.
|
|
453
|
+
|
|
454
|
+
Note: This returns the skills_cache which is populated during initialization.
|
|
455
|
+
The base class will bind owner_skillkit to these skills.
|
|
456
|
+
"""
|
|
457
|
+
return self.skills_cache
|
|
458
|
+
|
|
459
|
+
def shutdown(self):
|
|
460
|
+
"""Close and clean up resources - simplified version, avoiding complex asynchronous operations"""
|
|
461
|
+
try:
|
|
462
|
+
logger.debug("Starting simplified MCP skillkit shutdown")
|
|
463
|
+
|
|
464
|
+
# Simplify cleanup logic to avoid deadlocks and complex asynchronous operations
|
|
465
|
+
try:
|
|
466
|
+
# Clean adapter cache in a non-blocking manner
|
|
467
|
+
lock_acquired = False
|
|
468
|
+
try:
|
|
469
|
+
lock_acquired = self._cache_lock.acquire(blocking=False)
|
|
470
|
+
if lock_acquired:
|
|
471
|
+
self._adapter_cache.clear()
|
|
472
|
+
# Remove itself from the instance list
|
|
473
|
+
if self in self._instances:
|
|
474
|
+
self._instances.remove(self)
|
|
475
|
+
else:
|
|
476
|
+
# If the lock cannot be acquired, skip cache cleanup to avoid blocking
|
|
477
|
+
logger.warning(
|
|
478
|
+
"Could not acquire cache lock during shutdown, skipping cache cleanup"
|
|
479
|
+
)
|
|
480
|
+
finally:
|
|
481
|
+
if lock_acquired:
|
|
482
|
+
try:
|
|
483
|
+
self._cache_lock.release()
|
|
484
|
+
except Exception:
|
|
485
|
+
pass
|
|
486
|
+
|
|
487
|
+
# Clean up other resources
|
|
488
|
+
self.server_configs.clear()
|
|
489
|
+
self.skills_cache.clear()
|
|
490
|
+
self.initialized = False
|
|
491
|
+
|
|
492
|
+
except Exception as e:
|
|
493
|
+
logger.warning(f"Error during resource cleanup: {e}")
|
|
494
|
+
|
|
495
|
+
# Simplify connection pool cleanup
|
|
496
|
+
try:
|
|
497
|
+
from .mcp_adapter import MCPAdapter
|
|
498
|
+
|
|
499
|
+
MCPAdapter.cleanup_connections()
|
|
500
|
+
except Exception as e:
|
|
501
|
+
logger.warning(f"Connection cleanup failed: {e}")
|
|
502
|
+
|
|
503
|
+
# Simplify thread pool shutdown
|
|
504
|
+
try:
|
|
505
|
+
global _global_thread_pool
|
|
506
|
+
_global_thread_pool.shutdown()
|
|
507
|
+
except Exception as e:
|
|
508
|
+
logger.warning(f"Thread pool shutdown failed: {e}")
|
|
509
|
+
|
|
510
|
+
logger.debug("MCP skillkit shut down successfully")
|
|
511
|
+
|
|
512
|
+
except Exception as e:
|
|
513
|
+
# Do not raise exceptions in shutdown, especially when called by atexit
|
|
514
|
+
logger.debug(f"Error during shutdown: {e}")
|
|
515
|
+
|
|
516
|
+
def __del__(self):
|
|
517
|
+
"""Destructor, ensuring resource cleanup"""
|
|
518
|
+
try:
|
|
519
|
+
if self.initialized:
|
|
520
|
+
self.shutdown()
|
|
521
|
+
except Exception:
|
|
522
|
+
# Do not throw exceptions in destructors
|
|
523
|
+
pass
|
|
524
|
+
|
|
525
|
+
def get_connection_status(self) -> Dict[str, Any]:
|
|
526
|
+
"""Get connection status"""
|
|
527
|
+
try:
|
|
528
|
+
from .mcp_adapter import MCPAdapter
|
|
529
|
+
|
|
530
|
+
status = MCPAdapter.get_connection_status()
|
|
531
|
+
|
|
532
|
+
# Add server configuration information
|
|
533
|
+
for server_name, server_config in self.server_configs.items():
|
|
534
|
+
if server_name in status:
|
|
535
|
+
status[server_name]["server_config"] = {
|
|
536
|
+
"command": server_config.command,
|
|
537
|
+
"timeout": server_config.timeout,
|
|
538
|
+
"enabled": server_config.enabled,
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
return status
|
|
542
|
+
except Exception as e:
|
|
543
|
+
logger.debug(f"Error getting connection status: {e}")
|
|
544
|
+
return {}
|
|
545
|
+
|
|
546
|
+
def get_performance_stats(self) -> Dict[str, Any]:
|
|
547
|
+
"""Get performance statistics"""
|
|
548
|
+
try:
|
|
549
|
+
global _connection_pool
|
|
550
|
+
|
|
551
|
+
stats = {
|
|
552
|
+
"total_servers": len(self.server_configs),
|
|
553
|
+
"enabled_servers": len(
|
|
554
|
+
[c for c in self.server_configs.values() if c.enabled]
|
|
555
|
+
),
|
|
556
|
+
"initialized": self.initialized,
|
|
557
|
+
"total_skills": len(self.skills_cache),
|
|
558
|
+
"connection_pool_stats": {},
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
# Get connection pool statistics
|
|
562
|
+
for server_name, connections in _connection_pool.pool.items():
|
|
563
|
+
stats["connection_pool_stats"][server_name] = {
|
|
564
|
+
"total_connections": len(connections),
|
|
565
|
+
"active_connections": sum(
|
|
566
|
+
1 for c in connections if c.get("in_use")
|
|
567
|
+
),
|
|
568
|
+
"invalid_connections": sum(
|
|
569
|
+
1 for c in connections if c.get("invalid")
|
|
570
|
+
),
|
|
571
|
+
"oldest_connection_age": None,
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
# Calculate the age of the oldest connection
|
|
575
|
+
if connections:
|
|
576
|
+
current_time = asyncio.get_event_loop().time()
|
|
577
|
+
oldest_time = min(
|
|
578
|
+
c.get("created_at", current_time) for c in connections
|
|
579
|
+
)
|
|
580
|
+
stats["connection_pool_stats"][server_name][
|
|
581
|
+
"oldest_connection_age"
|
|
582
|
+
] = current_time - oldest_time
|
|
583
|
+
|
|
584
|
+
return stats
|
|
585
|
+
except Exception as e:
|
|
586
|
+
logger.debug(f"Error getting performance stats: {e}")
|
|
587
|
+
return {}
|
|
588
|
+
|
|
589
|
+
def test_connections(self) -> Dict[str, bool]:
|
|
590
|
+
"""Test all connections"""
|
|
591
|
+
results = {}
|
|
592
|
+
# Get a snapshot of the adapter to avoid modification during iteration
|
|
593
|
+
with self._cache_lock:
|
|
594
|
+
adapters_snapshot = dict(self._adapter_cache)
|
|
595
|
+
|
|
596
|
+
for server_name, adapter in adapters_snapshot.items():
|
|
597
|
+
try:
|
|
598
|
+
# Test connection using global thread pool
|
|
599
|
+
def test_connection_sync():
|
|
600
|
+
loop = get_or_create_event_loop()
|
|
601
|
+
return loop.run_until_complete(adapter.test_connection())
|
|
602
|
+
|
|
603
|
+
global _global_thread_pool
|
|
604
|
+
executor = _global_thread_pool.get_executor()
|
|
605
|
+
future = executor.submit(test_connection_sync)
|
|
606
|
+
results[server_name] = future.result(timeout=10)
|
|
607
|
+
|
|
608
|
+
except Exception as e:
|
|
609
|
+
logger.debug(f"Error testing connection for {server_name}: {e}")
|
|
610
|
+
results[server_name] = False
|
|
611
|
+
|
|
612
|
+
return results
|
|
613
|
+
|
|
614
|
+
def _generate_detailed_docstring(
|
|
615
|
+
self, tool_description: str, tool_parameters: Dict[str, Any]
|
|
616
|
+
) -> str:
|
|
617
|
+
"""Generate detailed docstrings with specific parameter descriptions"""
|
|
618
|
+
docstring_parts = [tool_description]
|
|
619
|
+
|
|
620
|
+
# Check if there are valid parameter definitions
|
|
621
|
+
has_valid_params = (
|
|
622
|
+
tool_parameters
|
|
623
|
+
and "properties" in tool_parameters
|
|
624
|
+
and tool_parameters["properties"]
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
if has_valid_params:
|
|
628
|
+
docstring_parts.append("\nArgs:")
|
|
629
|
+
|
|
630
|
+
properties = tool_parameters["properties"]
|
|
631
|
+
required_params = tool_parameters.get("required", [])
|
|
632
|
+
|
|
633
|
+
for param_name, param_info in properties.items():
|
|
634
|
+
param_type = param_info.get("type", "Any")
|
|
635
|
+
param_desc = param_info.get("description", "")
|
|
636
|
+
is_required = param_name in required_params
|
|
637
|
+
|
|
638
|
+
# Standardized Type Names
|
|
639
|
+
type_mapping = {
|
|
640
|
+
"string": "str",
|
|
641
|
+
"integer": "int",
|
|
642
|
+
"number": "float",
|
|
643
|
+
"boolean": "bool",
|
|
644
|
+
"array": "list",
|
|
645
|
+
"object": "dict",
|
|
646
|
+
}
|
|
647
|
+
param_type = type_mapping.get(param_type, param_type)
|
|
648
|
+
|
|
649
|
+
# If no description is provided, generate a reasonable default description
|
|
650
|
+
if not param_desc:
|
|
651
|
+
param_desc = f"The {param_name} parameter"
|
|
652
|
+
|
|
653
|
+
# Add required indicator
|
|
654
|
+
if is_required:
|
|
655
|
+
param_desc += " (required)"
|
|
656
|
+
else:
|
|
657
|
+
param_desc += " (optional)"
|
|
658
|
+
|
|
659
|
+
# Format parameter line
|
|
660
|
+
param_line = f" {param_name} ({param_type}): {param_desc}"
|
|
661
|
+
docstring_parts.append(param_line)
|
|
662
|
+
else:
|
|
663
|
+
# Provide default parameter definitions for common tools
|
|
664
|
+
tool_name = tool_description.lower()
|
|
665
|
+
if "fetch" in tool_name or "url" in tool_name:
|
|
666
|
+
docstring_parts.append("\nArgs:")
|
|
667
|
+
docstring_parts.append(
|
|
668
|
+
" url (str): The URL to fetch content from (required)"
|
|
669
|
+
)
|
|
670
|
+
elif "search" in tool_name:
|
|
671
|
+
docstring_parts.append("\nArgs:")
|
|
672
|
+
docstring_parts.append(" query (str): The search query (required)")
|
|
673
|
+
elif "file" in tool_name and ("read" in tool_name or "get" in tool_name):
|
|
674
|
+
docstring_parts.append("\nArgs:")
|
|
675
|
+
docstring_parts.append(
|
|
676
|
+
" path (str): The file path to read (required)"
|
|
677
|
+
)
|
|
678
|
+
elif "file" in tool_name and (
|
|
679
|
+
"write" in tool_name or "create" in tool_name
|
|
680
|
+
):
|
|
681
|
+
docstring_parts.append("\nArgs:")
|
|
682
|
+
docstring_parts.append(
|
|
683
|
+
" path (str): The file path to write to (required)"
|
|
684
|
+
)
|
|
685
|
+
docstring_parts.append(
|
|
686
|
+
" content (str): The content to write (required)"
|
|
687
|
+
)
|
|
688
|
+
elif "directory" in tool_name or "folder" in tool_name:
|
|
689
|
+
docstring_parts.append("\nArgs:")
|
|
690
|
+
docstring_parts.append(" path (str): The directory path (required)")
|
|
691
|
+
else:
|
|
692
|
+
# For other tools, do not generate the Args section to avoid parsing issues.
|
|
693
|
+
# Or a simple parameter description can be generated
|
|
694
|
+
docstring_parts.append("\nNote:")
|
|
695
|
+
docstring_parts.append(
|
|
696
|
+
" This tool may accept various parameters. Please refer to the tool documentation."
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Add return value description
|
|
700
|
+
docstring_parts.append("\nReturns:")
|
|
701
|
+
docstring_parts.append(" str: Tool execution result")
|
|
702
|
+
|
|
703
|
+
return "\n".join(docstring_parts)
|
|
704
|
+
|
|
705
|
+
def _create_openai_tool_schema(
|
|
706
|
+
self,
|
|
707
|
+
server_name: str,
|
|
708
|
+
tool_name: str,
|
|
709
|
+
tool_description: str,
|
|
710
|
+
tool_parameters: Dict[str, Any],
|
|
711
|
+
) -> Dict[str, Any]:
|
|
712
|
+
"""Create OpenAI tool schema"""
|
|
713
|
+
function_name = f"{server_name}_{tool_name}"
|
|
714
|
+
|
|
715
|
+
# Build parameter schema
|
|
716
|
+
parameters_schema = {"type": "object", "properties": {}}
|
|
717
|
+
required_params = []
|
|
718
|
+
|
|
719
|
+
if (
|
|
720
|
+
tool_parameters
|
|
721
|
+
and "properties" in tool_parameters
|
|
722
|
+
and tool_parameters["properties"]
|
|
723
|
+
):
|
|
724
|
+
properties = tool_parameters["properties"]
|
|
725
|
+
required_list = tool_parameters.get("required", [])
|
|
726
|
+
|
|
727
|
+
for param_name, param_info in properties.items():
|
|
728
|
+
param_type = param_info.get("type", "string")
|
|
729
|
+
param_desc = param_info.get(
|
|
730
|
+
"description", f"The {param_name} parameter"
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
# Standardized Type Names
|
|
734
|
+
type_mapping = {
|
|
735
|
+
"string": "string",
|
|
736
|
+
"integer": "integer",
|
|
737
|
+
"number": "number",
|
|
738
|
+
"boolean": "boolean",
|
|
739
|
+
"array": "array",
|
|
740
|
+
"object": "object",
|
|
741
|
+
}
|
|
742
|
+
param_type = type_mapping.get(param_type, "string")
|
|
743
|
+
|
|
744
|
+
parameters_schema["properties"][param_name] = {
|
|
745
|
+
"type": param_type,
|
|
746
|
+
"description": param_desc,
|
|
747
|
+
}
|
|
748
|
+
|
|
749
|
+
if param_name in required_list:
|
|
750
|
+
required_params.append(param_name)
|
|
751
|
+
|
|
752
|
+
if required_params:
|
|
753
|
+
parameters_schema["required"] = required_params
|
|
754
|
+
|
|
755
|
+
# Build a complete OpenAI tool schema
|
|
756
|
+
openai_tool_schema = {
|
|
757
|
+
"type": "function",
|
|
758
|
+
"function": {
|
|
759
|
+
"name": function_name,
|
|
760
|
+
"description": tool_description,
|
|
761
|
+
"parameters": parameters_schema,
|
|
762
|
+
},
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
return openai_tool_schema
|
|
766
|
+
|
|
767
|
+
@classmethod
|
|
768
|
+
def clear_adapter_cache(cls):
|
|
769
|
+
"""Clear adapter cache (for testing or resetting)"""
|
|
770
|
+
with cls._cache_lock:
|
|
771
|
+
cls._adapter_cache.clear()
|