gomyck-tools 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ctools/ai/__init__.py ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/22 15:56'
@@ -0,0 +1,33 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/16 16:42'
5
+
6
+ import json
7
+ import os
8
+ from typing import Any
9
+
10
+ from dotenv.main import DotEnv
11
+
12
+
13
+ class Configuration:
14
+ """Manages configuration and environment variables for the MCP client."""
15
+
16
+ def __init__(self, dotenv_path: str=".env") -> None:
17
+ """Initialize configuration with environment variables."""
18
+ if not os.path.exists(dotenv_path): raise FileNotFoundError(f"Could not find .env file at {dotenv_path}")
19
+ self.env = DotEnv(dotenv_path=dotenv_path)
20
+
21
+ def get_env(self, key: str) -> str:
22
+ value = self.env.get(key)
23
+ return value if value else os.getenv(key)
24
+
25
+ def get_llm_api_key(self) -> str:
26
+ api_key = self.get_env("LLM_API_KEY")
27
+ if not api_key: raise ValueError("LLM_API_KEY not found in environment variables")
28
+ return api_key
29
+
30
+ def get_mcp_server_config(self) -> dict[str, Any]:
31
+ with open(self.get_env("MCP_CONFIG_PATH"), "r") as f:
32
+ return json.load(f)
33
+
ctools/ai/llm_chat.py ADDED
@@ -0,0 +1,168 @@
1
+ from ctools import sys_log
2
+ from ctools.ai.llm_client import LLMClient
3
+ from ctools.ai.mcp.mcp_client import MCPClient, res_has_img, is_image_content, get_tools_prompt, mcp_tool_call
4
+ from ctools.ai.tools.xml_extract import extract_all_xml_blocks
5
+ from ctools.ai.tools.think_process import remove_think_blocks
6
+
7
+ log = sys_log.flog
8
+
9
+ continue_prompt_default = """
10
+ 1.请继续处理尚未完成的内容,跳过所有已处理完成的部分.
11
+ 2.工具调用时,请参考上一次工具调用的参数,仅对偏移量相关的参数进行调整,以接续上一次处理的进度.
12
+ 3.如果你认为所有数据都处理完毕, 请输出标记:{end_flag}.
13
+ """
14
+
15
+ class ROLE:
16
+ ASSISTANT = "assistant"
17
+ USER = "user"
18
+ SYSTEM = "system"
19
+
20
+ def get_message_json(role_type: ROLE, content):
21
+ return {"role": role_type, "content": content}
22
+
23
+ class ChatSession:
24
+
25
+ def __init__(self, prompts: str, llm_client: LLMClient, max_tools_call: int = 10, mcp_clients: list[MCPClient] = None,
26
+ auto_complete: bool = False, end_flag: str = "EOF", continue_prompt: str = continue_prompt_default) -> None:
27
+ """
28
+ 初始化聊天
29
+ :param prompts: 提示词
30
+ :param llm_client: llm 客户端
31
+ :param max_tools_call: 最大单次工具调用次数
32
+ :param mcp_clients: mcp_clients
33
+ :param auto_complete: 是否自动完成
34
+ :param end_flag: 结束标记
35
+ :param continue_prompt: 自动完成时的继续提示
36
+ """
37
+ self.mcp_clients: list[MCPClient] = mcp_clients
38
+ self.llm_client: LLMClient = llm_client
39
+ self.prompts: str = prompts
40
+ self.max_tools_call = max_tools_call
41
+ self.auto_complete = auto_complete
42
+ self.end_flag = end_flag
43
+ self.continue_prompt = continue_prompt.format(end_flag=self.end_flag)
44
+
45
+ self.current_message = {}
46
+ self.full_messages = []
47
+
48
+ async def init_prompts(self, user_system_prompt):
49
+ if self.mcp_clients:
50
+ if user_system_prompt:
51
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, user_system_prompt)
52
+ elif self.prompts:
53
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, self.prompts)
54
+ else:
55
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, "")
56
+ self.full_messages.append(get_message_json(ROLE.SYSTEM, mcp_tools_prompt))
57
+ #log.info(mcp_tools_prompt)
58
+ else:
59
+ if user_system_prompt:
60
+ self.full_messages.append(get_message_json(ROLE.SYSTEM, user_system_prompt))
61
+ elif self.prompts:
62
+ self.full_messages.append(get_message_json(ROLE.SYSTEM, self.prompts))
63
+
64
+ async def chat(self, user_input: [str], get_call_id: callable(str) = lambda: "None", get_event_msg_func: callable(str) = None, get_full_msg_func: callable(str) = None):
65
+ """
66
+ 对话
67
+ Parameters
68
+ ----------
69
+ user_input 用户输入 [{"role": "user", "content": "hello"}]
70
+ get_call_id 本次对话 ID func
71
+ get_event_msg_func(get_call_id(), role, msg) 获取实时回复(流式回答)
72
+ get_full_msg_func(get_call_id(), is_final, msg) 获取完整的回答列表
73
+ -------
74
+ """
75
+ # 获取 prompt
76
+ user_system_prompt = user_input[0]["content"] if user_input[0]["role"] == "system" else ""
77
+ user_input = user_input[1:] if user_input[0]["role"] == "system" else user_input
78
+ await self.init_prompts(user_system_prompt)
79
+ try:
80
+ self.full_messages.extend(user_input)
81
+ last_user_input = next((item["content"] for item in reversed(user_input) if item.get("role") == "user"), None)
82
+ current_process_index = 0
83
+ max_tools_call = self.max_tools_call
84
+ final_resp = False
85
+ while (current_process_index < max_tools_call and not final_resp) or (self.auto_complete and current_process_index < 100):
86
+ log.info("\n正在生成回答: %s", self.full_messages)
87
+ res = []
88
+ async for chunk in self.llm_client.model_completion(self.full_messages):
89
+ res.append(chunk)
90
+ await self.process_chunk_message(chunk, get_call_id, get_event_msg_func)
91
+ llm_response = "".join(res)
92
+ log.info("\n收到回答: %s", llm_response)
93
+ no_think_llm_response = remove_think_blocks(llm_response) # 处理掉 think, 然后再判断 EOF, 避免 think 里出现 EOF
94
+ if self.end_flag in no_think_llm_response:
95
+ self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response.replace(self.end_flag, ""))) # 去掉 EOF
96
+ current_process_index = 999
97
+ final_resp = True
98
+ await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
99
+ else:
100
+ xml_blocks = extract_all_xml_blocks(llm_response)
101
+ if xml_blocks:
102
+ for xml_block in xml_blocks:
103
+ tool_call_result = await mcp_tool_call(self.mcp_clients, xml_block)
104
+ log.info("\nMCP调用结果: %s", tool_call_result)
105
+ current_process_index += 1
106
+ if tool_call_result == xml_block:
107
+ self.full_messages.append(get_message_json(ROLE.USER, "工具调用出现错误, 请重试."))
108
+ elif current_process_index == max_tools_call - 1:
109
+ await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
110
+ self.full_messages.append(get_message_json(ROLE.USER, "调用次数已达上限, 请直接回答.")) # 不能调换顺序
111
+ else:
112
+ self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response)) # 不能调换顺序
113
+ await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
114
+ await self.process_tool_call_message(get_call_id, get_event_msg_func, tool_call_result)
115
+ final_resp = False
116
+ else:
117
+ self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response))
118
+ if self.auto_complete: self.full_messages.append(get_message_json(ROLE.USER, self.continue_prompt))
119
+ final_resp = True
120
+ await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
121
+ except Exception as e:
122
+ log.exception(e)
123
+ error_msg = '系统出现错误, 请重试~ {}'.format(e)
124
+ self.full_messages.append(get_message_json(ROLE.ASSISTANT, error_msg))
125
+ await self.process_error_message(error_msg, get_call_id, get_event_msg_func, get_full_msg_func)
126
+ finally:
127
+ return self.current_message
128
+
129
+ async def process_error_message(self, error_msg, get_call_id, get_event_msg_func, get_full_msg_func):
130
+ # 最终结果通知前端+实时通知都要有
131
+ self.current_message = error_msg
132
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.ASSISTANT, self.current_message)
133
+ if get_full_msg_func: await get_full_msg_func(get_call_id(), True, self.full_messages)
134
+
135
+ async def process_chunk_message(self, chunk, get_call_id, get_event_msg_func):
136
+ # 实时通知前端
137
+ self.current_message = chunk
138
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.ASSISTANT, self.current_message)
139
+
140
+ async def process_tool_call_message(self, get_call_id, get_event_msg_func, tool_call_result):
141
+ # 实时通知前端(工具调用特殊通知一次) 如果是图片结果, 就是 user 消息(必须是 user, 否则 api 报错), 否则是 system(现在统一都改成 user 了, 看看后面有没有改回 system 的必要)
142
+ self.current_message = tool_call_result["result"] if res_has_img(tool_call_result) else tool_call_result
143
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.USER, self.current_message)
144
+
145
+ async def process_full_message(self, final_resp, get_call_id, get_full_msg_func):
146
+ if get_full_msg_func: await get_full_msg_func(get_call_id(), final_resp, self.full_messages)
147
+
148
+ async def add_tool_call_res_2_message(self, last_user_input, tool_call_result: dict):
149
+ if type(tool_call_result) != dict: return
150
+ response:[] = tool_call_result.get("result")
151
+ image_content = []
152
+ for rep in response:
153
+ if not is_image_content(rep):
154
+ self.full_messages.append(get_message_json(ROLE.USER, str(rep)))
155
+ else:
156
+ image_content.append({
157
+ "type": "image_url",
158
+ "image_url": {
159
+ "url": f'data:{rep["mime_type"]};base64,{rep["data"]}'
160
+ }
161
+ })
162
+ if image_content:
163
+ image_content.append({
164
+ "type": "text",
165
+ "text": last_user_input
166
+ })
167
+ self.full_messages.append(get_message_json(ROLE.USER, image_content))
168
+
@@ -0,0 +1,133 @@
1
+ import logging
2
+ import os
3
+ import sys
4
+
5
+ import httpx
6
+
7
+ from ctools import sys_log, cjson
8
+
9
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
10
+ logging.getLogger("httpx").setLevel(logging.WARNING)
11
+ logging.getLogger("mcp.client.sse").setLevel(logging.WARNING)
12
+
13
+ log = sys_log.flog
14
+
15
+ def process_SSE(line):
16
+ if not line: return None
17
+ if line.startswith("data: "):
18
+ data = line[6:]
19
+ if data == "[DONE]":
20
+ return "DONE"
21
+ return data
22
+
23
+ class LLMClient:
24
+ """Manages communication with the LLM provider."""
25
+
26
+ def __init__(self, api_key: str=os.getenv("LLM_API_KEY"),
27
+ llm_url: str="https://api.siliconflow.cn/v1/",
28
+ model_name: str="Qwen/Qwen3-235B-A22B",
29
+ temperature: float=1, stream: bool=True,
30
+ thinking: bool=True,
31
+ thinking_budget: int=4096,
32
+ max_tokens: int=8192,
33
+ top_p: float=0.5,
34
+ top_k: int=50,
35
+ frequency_penalty: float=0.5
36
+ ) -> None:
37
+ self.api_key = api_key
38
+ self.llm_url = llm_url
39
+ self.model_name = model_name
40
+ self.temperature = temperature
41
+ self.stream = stream
42
+ self.thinking = thinking
43
+ self.thinking_budget = thinking_budget
44
+ self.max_tokens = max_tokens
45
+ self.top_p = top_p
46
+ self.top_k = top_k
47
+ self.frequency_penalty = frequency_penalty
48
+
49
+ async def model_completion(self, messages: list[dict[str, str]]):
50
+ self.no_think_compatible(messages)
51
+ url = self.llm_url
52
+ headers = {
53
+ "Content-Type": "application/json",
54
+ "Authorization": f"Bearer {self.api_key}",
55
+ }
56
+ payload = {
57
+ "messages": messages,
58
+ "model": self.model_name,
59
+ "temperature": self.temperature,
60
+ "max_tokens": self.max_tokens,
61
+ "top_p": self.top_p,
62
+ "top_k": self.top_k,
63
+ "frequency_penalty": self.frequency_penalty,
64
+ "stream": self.stream,
65
+ "enable_thinking": self.thinking,
66
+ "thinking_budget": self.thinking_budget
67
+ }
68
+ try:
69
+ req_url = "chat/completions"
70
+ if self.stream:
71
+ async with httpx.AsyncClient(timeout=None, base_url=url) as client:
72
+ async with client.stream("POST", req_url, headers=headers, json=payload) as response:
73
+ response.raise_for_status()
74
+ # 兼容 DS QWEN 的思维链
75
+ start_think: bool = False
76
+ end_think: bool = False
77
+ start_token: str = "<think>"
78
+ end_token: str = "</think>"
79
+ # 兼容 DS QWEN 的思维链
80
+ async for line in response.aiter_lines():
81
+ data = process_SSE(line)
82
+ if not data: continue
83
+ if data == "DONE":
84
+ continue
85
+ choice = cjson.loads(data)["choices"][0]
86
+ if "message" in choice:
87
+ content = choice["message"]["content"]
88
+ else:
89
+ content = choice["delta"].get("content", "")
90
+ # 兼容 DS QWEN 的思维链
91
+ reasoning_content = choice["delta"].get("reasoning_content", "")
92
+ if not start_think and not content and reasoning_content:
93
+ content = f"{start_token}{reasoning_content}"
94
+ start_think = True
95
+ if not end_think and start_think and not reasoning_content:
96
+ content = f"{end_token}{content}"
97
+ end_think = True
98
+ if not content: content = reasoning_content
99
+ if not content: continue
100
+ # 兼容 DS QWEN 的思维链
101
+ yield content
102
+ else:
103
+ async with httpx.AsyncClient(timeout=None, base_url=url) as client:
104
+ response = await client.post(req_url, headers=headers, json=payload)
105
+ response.raise_for_status()
106
+ content = response.json()["choices"][0]["message"]["content"]
107
+ yield content
108
+ except httpx.RequestError as e:
109
+ error_message = f"Error getting LLM response: {str(e)}"
110
+ log.error(error_message)
111
+ if isinstance(e, httpx.HTTPStatusError):
112
+ status_code = e.response.status_code
113
+ log.error(f"Status code: {status_code}")
114
+ log.error(f"Response details: {e.response.text}")
115
+ yield f"I encountered an error: {error_message}. Please try again or rephrase your request."
116
+
117
+ def no_think_compatible(self, messages):
118
+ if not self.thinking and "qwen3" in self.model_name:
119
+ for msg in messages:
120
+ if (msg.get("role") == "user" or msg.get("role") == "system") and "/no_think" not in msg.get("content", ""):
121
+ msg["content"] += " /no_think"
122
+
123
+ # if __name__ == '__main__':
124
+ # from env_config import Configuration
125
+ #
126
+ # config = Configuration()
127
+ # # llm = LLMClient(config.get_llm_api_key(), llm_url="http://192.168.3.73:8000/v1/", stream=True, model_name="deepseek-r1:7b", thinking=False, verbose=True)
128
+ # llm = LLMClient(config.get_llm_api_key(), stream=True, model_name="Qwen/Qwen3-32B", thinking=False, verbose=True)
129
+ # res = []
130
+ # for chunk in llm.get_response([{"role": "user", "content": "写一个大概三百字的开心故事"}]):
131
+ # res.append(chunk)
132
+ # print("".join(res))
133
+
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/22 15:56'
@@ -0,0 +1,305 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/16 16:49'
5
+
6
+ import asyncio
7
+ import json
8
+ import shutil
9
+ from contextlib import AsyncExitStack, asynccontextmanager
10
+ from typing import Any
11
+
12
+ from mcp import ClientSession, StdioServerParameters
13
+ from mcp.client.sse import sse_client
14
+ from mcp.client.stdio import stdio_client
15
+ from mcp.client.streamable_http import streamablehttp_client
16
+ from mcp.types import CallToolResult, TextContent, ImageContent
17
+
18
+ from ctools import sys_log
19
+ from ctools.ai.tools.tool_use_xml_parse import parse_tool_use
20
+
21
+ log = sys_log.flog
22
+
23
+ sys_prompt_4_mcp = """
24
+ 1. In this environment you have access to a set of tools you can use to answer the user's question.
25
+ 2. You can use one tool per message, and will receive the result of that tool use in the user's response.
26
+ 3. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
27
+ 4. Before solving the task, break it down into clear, logical steps. List and number the steps first. Then, execute them one by one, There is no need to explain each step as you go. Do not skip any steps. Wait for confirmation before proceeding to the next step, if needed.
28
+
29
+ ## Tool Use Formatting
30
+
31
+ Tool use is formatted using XML-style tags. The tool name is enclosed in opening and closing tags, and each parameter is similarly enclosed within its own set of tags. Here's the structure:
32
+ <tool_use>
33
+ <name>{{tool_name}}</name>
34
+ <arguments>{{json_arguments}}</arguments>
35
+ </tool_use>
36
+
37
+ The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
38
+
39
+ <tool_use>
40
+ <name>say_hello</name>
41
+ <arguments>
42
+ {{
43
+ "content": "你好"
44
+ }}
45
+ </arguments>
46
+ </tool_use>
47
+
48
+ The result should be a string, which can represent a file or any other output type. You can use this result as input for the next action.
49
+
50
+ Always adhere to this format for the tool use to ensure proper parsing and execution.
51
+
52
+ ## Tool Use Available Tools
53
+ Above example were using notional tools that might not exist for you. You only have access to these tools:
54
+ {tools_description}
55
+
56
+ ## Tool Use Rules
57
+ Here are the rules you should always follow to solve your task:
58
+ 1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
59
+ 2. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself.
60
+ 3. If no tool call is needed, just answer the question directly.
61
+ 4. Never re-do a tool call that you previously did with the exact same parameters.
62
+ 5. For tool use, MARK SURE use XML tag format as shown in the examples above. Do not use any other format.
63
+ 6. Parameter passing should never escape unicode, and this is done by default, do not convert Chinese to Unicode escape characters
64
+
65
+ # User Instructions
66
+ {user_system_prompt}
67
+
68
+ Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
69
+ """
70
+
71
+ tools_use_example = """
72
+ Here are a few examples using notional tools:
73
+
74
+ ---
75
+ User: "What is the result of the following operation: 5 + 3 + 1294.678?"
76
+
77
+ Assistant: I can use the python_interpreter tool to calculate the result of the operation.
78
+ <tool_use>
79
+ <name>python_interpreter</name>
80
+ <arguments>{"code": "5 + 3 + 1294.678"}</arguments>
81
+ </tool_use>
82
+
83
+ User: {
84
+ "tool_name": "python_interpreter",
85
+ "result": ["1302.678"]
86
+ }
87
+
88
+ Assistant: The result of the operation is 1302.678.
89
+
90
+ ---
91
+ User: "Which city has the highest population , Guangzhou or Shanghai?"
92
+
93
+ Assistant: I can use the search tool to find the population of Guangzhou.
94
+ <tool_use>
95
+ <name>search</name>
96
+ <arguments>{"query": "Population Guangzhou"}</arguments>
97
+ </tool_use>
98
+
99
+ User: {
100
+ "tool_name": "search",
101
+ "result": ["Guangzhou has a population of 15 million inhabitants as of 2021."]
102
+ }
103
+
104
+ Assistant: I can use the search tool to find the population of Shanghai.
105
+ <tool_use>
106
+ <name>search</name>
107
+ <arguments>{"query": "Population Shanghai"}</arguments>
108
+ </tool_use>
109
+
110
+ User: {
111
+ "tool_name": "search",
112
+ "result": ["26 million (2019)"]
113
+ }
114
+ Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
115
+ """
116
+
117
+ async def get_tools_prompt(mcp_clients, user_system_prompt) -> str:
118
+ all_tools = []
119
+ for client in mcp_clients:
120
+ tools = await client.list_server_tools()
121
+ all_tools.extend(tools)
122
+ return sys_prompt_4_mcp.format(tools_description="\n".join([tool.format_for_llm() for tool in all_tools]), user_system_prompt=user_system_prompt)
123
+
124
+ class Tool:
125
+
126
+ def __init__(self, name: str, description: str, input_schema: dict[str, Any]) -> None:
127
+ self.name: str = name
128
+ self.description: str = description
129
+ self.input_schema: dict[str, Any] = input_schema
130
+
131
+ def format_for_llm(self) -> str:
132
+ args_desc = []
133
+ if "properties" in self.input_schema:
134
+ for param_name, param_info in self.input_schema["properties"].items():
135
+ arg_desc = f"- {param_name}({param_info.get('type', 'Any')}): {param_info.get('description', '')}"
136
+ if param_name in self.input_schema.get("required", []):
137
+ arg_desc += " (required)"
138
+ args_desc.append(arg_desc)
139
+ return f"""
140
+ Tool: {self.name}
141
+ Description: {self.description}
142
+ Args_Info:
143
+ {chr(10).join(args_desc)}
144
+ """
145
+
146
+ class MCPClient:
147
+
148
+ def __init__(self, name: str, config: dict[str, Any]) -> None:
149
+ self.name: str = name
150
+ self.config: dict[str, Any] = config
151
+ self.stdio_context: Any | None = None
152
+ self.session: ClientSession | None = None
153
+ self.exit_stack: AsyncExitStack = AsyncExitStack()
154
+ self.tools = []
155
+
156
+ async def initialize(self) -> None:
157
+ if self.config.get('server_type') is None or self.config.get('server_type') == 'stdio':
158
+ command = (shutil.which("npx") if self.config["command"] == "npx" else self.config["command"])
159
+ if command is None: raise ValueError("The command must be a valid string and cannot be None.")
160
+ server_params = StdioServerParameters(
161
+ command=command,
162
+ args=self.config["args"],
163
+ env=self.config["env"] if self.config.get("env") else None,
164
+ )
165
+ stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
166
+ read, write = stdio_transport
167
+ self.session = await self.exit_stack.enter_async_context(ClientSession(read, write))
168
+ await self.session.initialize()
169
+ elif self.config['server_type'] == 'SSE':
170
+ sse_transport = await self.exit_stack.enter_async_context(sse_client(
171
+ url=self.config["url"],
172
+ headers=self.config["headers"],
173
+ timeout=self.config["timeout"],
174
+ sse_read_timeout=self.config["sse_read_timeout"]))
175
+ read, write = sse_transport
176
+ self.session = await self.exit_stack.enter_async_context(ClientSession(read, write))
177
+ await self.session.initialize()
178
+ elif self.config['server_type'] == 'Streamable HTTP':
179
+ stream_transport = await self.exit_stack.enter_async_context(streamablehttp_client(
180
+ url=self.config["url"],
181
+ headers=self.config["headers"],
182
+ timeout=self.config["timeout"],
183
+ sse_read_timeout=self.config["sse_read_timeout"]))
184
+ read, write, session_id = stream_transport
185
+ self.session = await self.exit_stack.enter_async_context(ClientSession(read, write))
186
+ await self.session.initialize()
187
+
188
+ async def list_server_tools(self) -> list[Any]:
189
+ if not self.session: raise RuntimeError(f"Server {self.name} not initialized")
190
+ if self.tools: return self.tools
191
+ tools_response = await self.session.list_tools()
192
+ for item in tools_response:
193
+ if isinstance(item, tuple) and item[0] == "tools":
194
+ self.tools.extend(Tool(tool.name, tool.description, tool.inputSchema) for tool in item[1])
195
+ return self.tools
196
+
197
+ async def execute_tool(
198
+ self,
199
+ tool_name: str,
200
+ arguments: dict[str, Any],
201
+ retries: int = 3,
202
+ delay: float = 1.0,
203
+ ) -> Any:
204
+ if not self.session: raise RuntimeError(f"Server {self.name} not initialized")
205
+ attempt = 0
206
+ args = arguments
207
+ while attempt < retries:
208
+ try:
209
+ log.info(f"Executing {tool_name}...")
210
+ result = await self.session.call_tool(tool_name, args)
211
+ return result
212
+ except Exception as e:
213
+ attempt += 1
214
+ log.warning(f"Error executing tool: {e}. Attempt {attempt} of {retries}.")
215
+ if attempt < retries:
216
+ log.info(f"Retrying in {delay} seconds...")
217
+ await asyncio.sleep(delay)
218
+ else:
219
+ log.error("Max retries reached. Failing.")
220
+ raise
221
+
222
+ async def cleanup(self) -> None:
223
+ await self.exit_stack.aclose()
224
+ self.session = None
225
+ self.stdio_context = None
226
+ self.exit_stack = None
227
+
228
+
229
+ async def mcp_tool_call(mcp_clients: MCPClient, xml_info: str) -> str:
230
+ if not mcp_clients: return xml_info
231
+ final_result = {
232
+ "tool_name": "",
233
+ "result": []
234
+ }
235
+ try:
236
+ tool_call = parse_tool_use(xml_info)
237
+ if "tool" in tool_call and "arguments" in tool_call:
238
+ log.info(f"Executing tool: {tool_call['tool']} With arguments: {tool_call['arguments']}")
239
+ for client in mcp_clients:
240
+ tools = await client.list_server_tools()
241
+ if any(tool.name == tool_call["tool"] for tool in tools):
242
+ final_result["tool_name"] = tool_call["tool"]
243
+ try:
244
+ result: CallToolResult = await client.execute_tool(tool_call["tool"], tool_call["arguments"])
245
+ text_result = []
246
+ image_result = []
247
+ tools_call_content = result.content
248
+ for content in tools_call_content:
249
+ if type(content) == TextContent:
250
+ try:
251
+ text_result.append(json.loads(content.text))
252
+ except Exception as e:
253
+ text_result.append(content.text)
254
+ elif type(content) == ImageContent:
255
+ image_result.append({"mime_type": content.mimeType, "data": content.data})
256
+ text_result.extend(image_result)
257
+ final_result["result"] = text_result
258
+ return final_result
259
+ except Exception as e:
260
+ log.exception(e)
261
+ error_msg = f"Error executing tool: {str(e)}"
262
+ final_result["result"] = [error_msg]
263
+ return final_result
264
+ return f"No server found with tool: {tool_call['tool']}"
265
+ return xml_info
266
+ except Exception as e:
267
+ log.exception(e)
268
+ error_msg = f"Error executing tool: {str(e)}"
269
+ final_result["result"] = [error_msg]
270
+ return final_result
271
+
272
+ def res_has_img(llm_response) -> bool:
273
+ if type(llm_response) == str: return False
274
+ response: [] = llm_response.get("result")
275
+ for rep in response:
276
+ if is_image_content(rep):
277
+ return True
278
+ return False
279
+
280
+ def is_image_content(content: dict) -> bool:
281
+ try:
282
+ if content.get("mime_type") and content.get("data"):
283
+ return True
284
+ return False
285
+ except Exception:
286
+ return False
287
+
288
+ @asynccontextmanager
289
+ async def init_mcp_clients(mcp_server_config: dict[str, Any]) -> list[MCPClient]:
290
+ mcp_clients = []
291
+ for name, sc in mcp_server_config["mcpServers"].items():
292
+ try:
293
+ mc = MCPClient(name, sc)
294
+ await mc.initialize()
295
+ mcp_clients.append(mc)
296
+ except Exception as e:
297
+ log.exception(f"Error initializing MCP servers: {e}")
298
+ yield mcp_clients
299
+ for client in mcp_clients:
300
+ try:
301
+ print(client.name)
302
+ await client.cleanup()
303
+ except Exception as e:
304
+ log.exception(f"Error unloading MCP servers: {e}")
305
+
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/22 15:57'