gomyck-tools 1.3.7__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. ctools/ai/env_config.py +2 -0
  2. ctools/ai/llm_chat.py +27 -26
  3. ctools/ai/llm_client.py +87 -66
  4. ctools/ai/llm_exception.py +17 -0
  5. ctools/ai/mcp/mcp_client.py +5 -1
  6. ctools/ai/tools/quick_tools.py +99 -0
  7. ctools/ai/tools/xml_extract.py +0 -1
  8. ctools/application.py +46 -44
  9. ctools/auto/__init__.py +4 -0
  10. ctools/{browser_element_tools.py → auto/browser_element.py} +3 -2
  11. ctools/{plan_area_tools.py → auto/plan_area.py} +3 -0
  12. ctools/{resource_bundle_tools.py → auto/resource_bundle.py} +3 -3
  13. ctools/{screenshot_tools.py → auto/screenshot.py} +3 -2
  14. ctools/{win_control.py → auto/win_control.py} +5 -2
  15. ctools/cid.py +18 -0
  16. ctools/cipher/__init__.py +4 -0
  17. ctools/{rsa.py → cipher/rsa.py} +2 -2
  18. ctools/database/__init__.py +4 -0
  19. ctools/{database.py → database/database.py} +4 -3
  20. ctools/geo/__init__.py +4 -0
  21. ctools/metrics.py +2 -2
  22. ctools/office/__init__.py +4 -0
  23. ctools/{word_fill.py → office/word_fill.py} +1 -1
  24. ctools/pools/__init__.py +4 -0
  25. ctools/similar.py +22 -0
  26. ctools/stream/__init__.py +4 -0
  27. ctools/{credis.py → stream/credis.py} +5 -4
  28. ctools/{mqtt_utils.py → stream/mqtt_utils.py} +13 -10
  29. ctools/sys_info.py +5 -4
  30. ctools/sys_log.py +32 -31
  31. ctools/util/__init__.py +4 -0
  32. ctools/{http_utils.py → util/http_util.py} +0 -1
  33. ctools/{snow_id.py → util/snow_id.py} +2 -2
  34. ctools/web/__init__.py +4 -0
  35. ctools/{aio_web_server.py → web/aio_web_server.py} +23 -4
  36. ctools/{bottle_web_base.py → web/bottle_web_base.py} +2 -2
  37. ctools/{download_tools.py → web/download_util.py} +3 -2
  38. ctools/web/params_util.py +42 -0
  39. ctools/{upload_tools.py → web/upload_util.py} +3 -2
  40. {gomyck_tools-1.3.7.dist-info → gomyck_tools-1.3.9.dist-info}/METADATA +1 -1
  41. gomyck_tools-1.3.9.dist-info/RECORD +82 -0
  42. ctools/bashPath.py +0 -13
  43. ctools/console.py +0 -55
  44. ctools/enums.py +0 -4
  45. ctools/excelOpt.py +0 -36
  46. ctools/imgDialog.py +0 -45
  47. ctools/obj.py +0 -20
  48. ctools/str_diff.py +0 -20
  49. ctools/string_tools.py +0 -85
  50. gomyck_tools-1.3.7.dist-info/RECORD +0 -76
  51. /ctools/{pacth.py → auto/pacth.py} +0 -0
  52. /ctools/{pty_tools.py → auto/pty_process.py} +0 -0
  53. /ctools/{win_canvas.py → auto/win_canvas.py} +0 -0
  54. /ctools/{date_utils.py → cdate.py} +0 -0
  55. /ctools/{aes_tools.py → cipher/aes_util.py} +0 -0
  56. /ctools/{b64.py → cipher/b64.py} +0 -0
  57. /ctools/{czip.py → cipher/czip.py} +0 -0
  58. /ctools/{sign.py → cipher/sign.py} +0 -0
  59. /ctools/{sm_tools.py → cipher/sm_util.py} +0 -0
  60. /ctools/{coord_trans.py → geo/coord_trans.py} +0 -0
  61. /ctools/{douglas_rarefy.py → geo/douglas_rarefy.py} +0 -0
  62. /ctools/{cword.py → office/cword.py} +0 -0
  63. /ctools/{word_fill_entity.py → office/word_fill_entity.py} +0 -0
  64. /ctools/{work_path.py → path_info.py} +0 -0
  65. /ctools/{process_pool.py → pools/process_pool.py} +0 -0
  66. /ctools/{thread_pool.py → pools/thread_pool.py} +0 -0
  67. /ctools/{ckafka.py → stream/ckafka.py} +0 -0
  68. /ctools/{cftp.py → util/cftp.py} +0 -0
  69. /ctools/{compile_tools.py → util/compile_util.py} +0 -0
  70. /ctools/{html_soup.py → util/html_soup.py} +0 -0
  71. /ctools/{images_tools.py → util/image_process.py} +0 -0
  72. /ctools/{api_result.py → web/api_result.py} +0 -0
  73. /ctools/{bottle_webserver.py → web/bottle_webserver.py} +0 -0
  74. /ctools/{bottle_websocket.py → web/bottle_websocket.py} +0 -0
  75. /ctools/{ctoken.py → web/ctoken.py} +0 -0
  76. {gomyck_tools-1.3.7.dist-info → gomyck_tools-1.3.9.dist-info}/WHEEL +0 -0
  77. {gomyck_tools-1.3.7.dist-info → gomyck_tools-1.3.9.dist-info}/licenses/LICENSE +0 -0
  78. {gomyck_tools-1.3.7.dist-info → gomyck_tools-1.3.9.dist-info}/top_level.txt +0 -0
ctools/ai/env_config.py CHANGED
@@ -20,6 +20,8 @@ class Configuration:
20
20
 
21
21
  def get_env(self, key: str) -> str:
22
22
  value = self.env.get(key)
23
+ if value and value.lower() == "true": return True
24
+ if value and value.lower() == "false": return False
23
25
  return value if value else os.getenv(key)
24
26
 
25
27
  def get_llm_api_key(self) -> str:
ctools/ai/llm_chat.py CHANGED
@@ -1,8 +1,10 @@
1
1
  from ctools import sys_log
2
2
  from ctools.ai.llm_client import LLMClient
3
+ from ctools.ai.llm_exception import LLMException
3
4
  from ctools.ai.mcp.mcp_client import MCPClient, res_has_img, is_image_content, get_tools_prompt, mcp_tool_call
4
- from ctools.ai.tools.xml_extract import extract_all_xml_blocks
5
+ from ctools.ai.tools.quick_tools import build_message, ROLE
5
6
  from ctools.ai.tools.think_process import remove_think_blocks
7
+ from ctools.ai.tools.xml_extract import extract_all_xml_blocks
6
8
 
7
9
  log = sys_log.flog
8
10
 
@@ -12,14 +14,6 @@ continue_prompt_default = """
12
14
  3.如果你认为所有数据都处理完毕, 请输出标记:{end_flag}.
13
15
  """
14
16
 
15
- class ROLE:
16
- ASSISTANT = "assistant"
17
- USER = "user"
18
- SYSTEM = "system"
19
-
20
- def get_message_json(role_type: ROLE, content):
21
- return {"role": role_type, "content": content}
22
-
23
17
  class ChatSession:
24
18
 
25
19
  def __init__(self, prompts: str, llm_client: LLMClient, max_tools_call: int = 10, mcp_clients: list[MCPClient] = None,
@@ -44,7 +38,7 @@ class ChatSession:
44
38
 
45
39
  self.current_message = {}
46
40
  self.full_messages = []
47
- self.is_success = True
41
+ self.res_code = 200
48
42
 
49
43
  async def init_prompts(self, user_system_prompt):
50
44
  if self.mcp_clients:
@@ -54,13 +48,13 @@ class ChatSession:
54
48
  mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, self.prompts)
55
49
  else:
56
50
  mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, "")
57
- self.full_messages.append(get_message_json(ROLE.SYSTEM, mcp_tools_prompt))
51
+ self.full_messages.append(build_message(ROLE.SYSTEM, mcp_tools_prompt))
58
52
  #log.info(mcp_tools_prompt)
59
53
  else:
60
54
  if user_system_prompt:
61
- self.full_messages.append(get_message_json(ROLE.SYSTEM, user_system_prompt))
55
+ self.full_messages.append(build_message(ROLE.SYSTEM, user_system_prompt))
62
56
  elif self.prompts:
63
- self.full_messages.append(get_message_json(ROLE.SYSTEM, self.prompts))
57
+ self.full_messages.append(build_message(ROLE.SYSTEM, self.prompts))
64
58
 
65
59
  async def chat(self, user_input: [str], get_call_id: callable(str) = lambda: "None", get_event_msg_func: callable(str) = None, get_full_msg_func: callable(str) = None):
66
60
  """
@@ -74,6 +68,7 @@ class ChatSession:
74
68
  -------
75
69
  """
76
70
  # 获取 prompt
71
+ if type(user_input) == dict: user_input = [user_input]
77
72
  user_system_prompt = user_input[0]["content"] if user_input[0]["role"] == "system" else ""
78
73
  user_input = user_input[1:] if user_input[0]["role"] == "system" else user_input
79
74
  await self.init_prompts(user_system_prompt)
@@ -93,7 +88,7 @@ class ChatSession:
93
88
  log.info("\n收到回答: %s", llm_response)
94
89
  no_think_llm_response = remove_think_blocks(llm_response) # 处理掉 think, 然后再判断 EOF, 避免 think 里出现 EOF
95
90
  if self.end_flag in no_think_llm_response:
96
- self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response.replace(self.end_flag, ""))) # 去掉 EOF
91
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response.replace(self.end_flag, ""))) # 去掉 EOF
97
92
  current_process_index = 999
98
93
  final_resp = True
99
94
  await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
@@ -105,31 +100,36 @@ class ChatSession:
105
100
  log.info("\nMCP调用结果: %s", tool_call_result)
106
101
  current_process_index += 1
107
102
  if tool_call_result == xml_block:
108
- self.full_messages.append(get_message_json(ROLE.USER, "工具调用出现错误, 请重试."))
103
+ self.full_messages.append(build_message(ROLE.USER, "工具调用出现错误, 请重试."))
109
104
  elif current_process_index == max_tools_call - 1:
110
105
  await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
111
- self.full_messages.append(get_message_json(ROLE.USER, "调用次数已达上限, 请直接回答.")) # 不能调换顺序
106
+ self.full_messages.append(build_message(ROLE.USER, "调用次数已达上限, 请直接回答.")) # 不能调换顺序
112
107
  else:
113
- self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response)) # 不能调换顺序
108
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response)) # 不能调换顺序
114
109
  await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
115
110
  await self.process_tool_call_message(get_call_id, get_event_msg_func, tool_call_result)
116
111
  final_resp = False
117
112
  else:
118
- self.full_messages.append(get_message_json(ROLE.ASSISTANT, llm_response))
119
- if self.auto_complete: self.full_messages.append(get_message_json(ROLE.USER, self.continue_prompt))
113
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response))
114
+ if self.auto_complete: self.full_messages.append(build_message(ROLE.USER, self.continue_prompt))
120
115
  final_resp = True
121
116
  await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
122
117
  except Exception as e:
123
118
  log.exception(e)
124
- error_msg = '系统出现错误, 请重试~ {}'.format(e)
125
- self.full_messages.append(get_message_json(ROLE.ASSISTANT, error_msg))
126
- await self.process_error_message(error_msg, get_call_id, get_event_msg_func, get_full_msg_func)
119
+ if isinstance(e, LLMException):
120
+ error_code = e.code
121
+ error_msg = '系统出现错误, 请重试: {}-{}'.format(e.code, e.message)
122
+ else:
123
+ error_code = 500
124
+ error_msg = '系统出现错误, 请重试: {}'.format(e)
125
+ self.full_messages.append(build_message(ROLE.ASSISTANT, error_msg))
126
+ await self.process_error_message(error_code, error_msg, get_call_id, get_event_msg_func, get_full_msg_func)
127
127
  finally:
128
128
  return self.current_message
129
129
 
130
- async def process_error_message(self, error_msg, get_call_id, get_event_msg_func, get_full_msg_func):
130
+ async def process_error_message(self, code, error_msg, get_call_id, get_event_msg_func, get_full_msg_func):
131
131
  # 最终结果通知前端+实时通知都要有
132
- self.is_success = False
132
+ self.res_code = code
133
133
  self.current_message = error_msg
134
134
  if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.ASSISTANT, self.current_message)
135
135
  if get_full_msg_func: await get_full_msg_func(get_call_id(), True, self.full_messages)
@@ -145,6 +145,7 @@ class ChatSession:
145
145
  if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.USER, self.current_message)
146
146
 
147
147
  async def process_full_message(self, final_resp, get_call_id, get_full_msg_func):
148
+ self.current_message = self.full_messages[-1]["content"]
148
149
  if get_full_msg_func: await get_full_msg_func(get_call_id(), final_resp, self.full_messages)
149
150
 
150
151
  async def add_tool_call_res_2_message(self, last_user_input, tool_call_result: dict):
@@ -153,7 +154,7 @@ class ChatSession:
153
154
  image_content = []
154
155
  for rep in response:
155
156
  if not is_image_content(rep):
156
- self.full_messages.append(get_message_json(ROLE.USER, str(rep)))
157
+ self.full_messages.append(build_message(ROLE.USER, str(rep)))
157
158
  else:
158
159
  image_content.append({
159
160
  "type": "image_url",
@@ -166,5 +167,5 @@ class ChatSession:
166
167
  "type": "text",
167
168
  "text": last_user_input
168
169
  })
169
- self.full_messages.append(get_message_json(ROLE.USER, image_content))
170
+ self.full_messages.append(build_message(ROLE.USER, image_content))
170
171
 
ctools/ai/llm_client.py CHANGED
@@ -1,10 +1,10 @@
1
1
  import logging
2
2
  import os
3
- import sys
4
3
 
5
4
  import httpx
6
5
 
7
6
  from ctools import sys_log, cjson
7
+ from ctools.ai.llm_exception import LLMException
8
8
 
9
9
  logging.getLogger("httpcore").setLevel(logging.WARNING)
10
10
  logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -17,25 +17,31 @@ def process_SSE(line):
17
17
  if line.startswith("data: "):
18
18
  data = line[6:]
19
19
  if data == "[DONE]":
20
- return "DONE"
20
+ return "[DONE]"
21
21
  return data
22
22
 
23
+ shared_client = httpx.AsyncClient(
24
+ base_url=os.getenv("LLM_BASE_URL", "https://api.siliconflow.cn/v1/"),
25
+ timeout=httpx.Timeout(connect=10.0, read=60.0, write=10.0, pool=5.0),
26
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
27
+ )
28
+
23
29
  class LLMClient:
24
30
  """Manages communication with the LLM provider."""
25
31
 
26
- def __init__(self, api_key: str=os.getenv("LLM_API_KEY"),
27
- llm_url: str="https://api.siliconflow.cn/v1/",
28
- model_name: str="Qwen/Qwen3-235B-A22B",
29
- temperature: float=1, stream: bool=True,
30
- thinking: bool=True,
31
- thinking_budget: int=4096,
32
- max_tokens: int=8192,
33
- top_p: float=0.5,
34
- top_k: int=50,
35
- frequency_penalty: float=0.5
36
- ) -> None:
32
+ def __init__(self,
33
+ api_key: str = os.getenv("LLM_API_KEY"),
34
+ model_name: str = "Qwen/Qwen3-235B-A22B",
35
+ temperature: float = 1,
36
+ stream: bool = False,
37
+ thinking: bool = True,
38
+ thinking_budget: int = 4096,
39
+ max_tokens: int = 4096,
40
+ top_p: float = 0.5,
41
+ top_k: int = 50,
42
+ frequency_penalty: float = 0.5
43
+ ) -> None:
37
44
  self.api_key = api_key
38
- self.llm_url = llm_url
39
45
  self.model_name = model_name
40
46
  self.temperature = temperature
41
47
  self.stream = stream
@@ -48,7 +54,6 @@ class LLMClient:
48
54
 
49
55
  async def model_completion(self, messages: list[dict[str, str]]):
50
56
  self.no_think_compatible(messages)
51
- url = self.llm_url
52
57
  headers = {
53
58
  "Content-Type": "application/json",
54
59
  "Authorization": f"Bearer {self.api_key}",
@@ -68,66 +73,82 @@ class LLMClient:
68
73
  try:
69
74
  req_url = "chat/completions"
70
75
  if self.stream:
71
- async with httpx.AsyncClient(timeout=None, base_url=url) as client:
72
- async with client.stream("POST", req_url, headers=headers, json=payload) as response:
73
- response.raise_for_status()
74
- # 兼容 DS QWEN 的思维链
75
- start_think: bool = False
76
- end_think: bool = False
77
- start_token: str = "<think>"
78
- end_token: str = "</think>"
79
- # 兼容 DS QWEN 的思维链
80
- async for line in response.aiter_lines():
81
- data = process_SSE(line)
82
- if not data: continue
83
- if data == "DONE":
84
- continue
85
- choice = cjson.loads(data)["choices"][0]
86
- if "message" in choice:
87
- content = choice["message"]["content"]
88
- else:
89
- content = choice["delta"].get("content", "")
90
- # 兼容 DS QWEN 的思维链
91
- reasoning_content = choice["delta"].get("reasoning_content", "")
92
- if not start_think and not content and reasoning_content:
93
- content = f"{start_token}{reasoning_content}"
94
- start_think = True
95
- if not end_think and start_think and not reasoning_content:
96
- content = f"{end_token}{content}"
97
- end_think = True
98
- if not content: content = reasoning_content
99
- if not content: continue
100
- # 兼容 DS QWEN 的思维链
76
+ async with shared_client.stream("POST", req_url, headers=headers, json=payload) as response:
77
+ response.raise_for_status()
78
+ start_think = False
79
+ end_think = False
80
+ async for line in response.aiter_lines():
81
+ data = process_SSE(line)
82
+ if not data or data == "[DONE]":
83
+ continue
84
+ choice = cjson.loads(data)["choices"][0]
85
+ if "message" in choice:
86
+ content = choice["message"]["content"]
87
+ else:
88
+ content = choice["delta"].get("content", "")
89
+ reasoning_content = choice["delta"].get("reasoning_content", "")
90
+ if not start_think and not content and reasoning_content:
91
+ content = f"<think>{reasoning_content}"
92
+ start_think = True
93
+ if not end_think and start_think and not reasoning_content:
94
+ content = f"</think>{content}"
95
+ end_think = True
96
+ if not content:
97
+ content = reasoning_content
98
+ if content:
101
99
  yield content
102
100
  else:
103
- async with httpx.AsyncClient(timeout=None, base_url=url) as client:
104
- response = await client.post(req_url, headers=headers, json=payload)
105
- response.raise_for_status()
106
- content = response.json()["choices"][0]["message"]["content"]
107
- yield content
108
- except httpx.RequestError as e:
101
+ response = await shared_client.post(req_url, headers=headers, json=payload)
102
+ response.raise_for_status()
103
+ content = response.json()["choices"][0]["message"]["content"]
104
+ yield content
105
+ except Exception as e:
109
106
  error_message = f"Error getting LLM response: {str(e)}"
110
107
  log.error(error_message)
111
108
  if isinstance(e, httpx.HTTPStatusError):
112
- status_code = e.response.status_code
113
- log.error(f"Status code: {status_code}")
109
+ log.error(f"Status code: {e.response.status_code}")
114
110
  log.error(f"Response details: {e.response.text}")
115
- yield f"I encountered an error: {error_message}. Please try again or rephrase your request."
111
+ raise LLMException(e.response.status_code, e.response.text)
112
+ raise LLMException(code=500, message=error_message)
116
113
 
117
114
  def no_think_compatible(self, messages):
118
- if not self.thinking and "qwen3" in self.model_name:
115
+ if not self.thinking and "qwen3" in self.model_name.lower():
119
116
  for msg in messages:
120
- if (msg.get("role") == "user" or msg.get("role") == "system") and "/no_think" not in msg.get("content", ""):
117
+ if msg.get("role") in ("user", "system") and "/no_think" not in msg.get("content", ""):
121
118
  msg["content"] += " /no_think"
122
119
 
123
- # if __name__ == '__main__':
124
- # from env_config import Configuration
125
- #
126
- # config = Configuration()
127
- # # llm = LLMClient(config.get_llm_api_key(), llm_url="http://192.168.3.73:8000/v1/", stream=True, model_name="deepseek-r1:7b", thinking=False, verbose=True)
128
- # llm = LLMClient(config.get_llm_api_key(), stream=True, model_name="Qwen/Qwen3-32B", thinking=False, verbose=True)
129
- # res = []
130
- # for chunk in llm.get_response([{"role": "user", "content": "写一个大概三百字的开心故事"}]):
131
- # res.append(chunk)
132
- # print("".join(res))
120
+ async def voice_2_text(self, file: bytes = None, file_path: str = None):
121
+ try:
122
+ if file_path:
123
+ with open(file_path, "rb") as f:
124
+ file = f.read()
125
+ req_url = "/audio/transcriptions"
126
+ headers = {
127
+ "Authorization": f"Bearer {self.api_key}",
128
+ }
129
+ files = {
130
+ "model": (None, self.model_name),
131
+ "file": ("audio.wav", file, "audio/wav"),
132
+ }
133
+ response = await shared_client.post(req_url, headers=headers, files=files)
134
+ response.raise_for_status()
135
+ return response.json()["text"]
136
+ except Exception as e:
137
+ error_message = f"Error getting LLM response: {str(e)}"
138
+ log.error(error_message)
139
+ if isinstance(e, httpx.HTTPStatusError):
140
+ log.error(f"Status code: {e.response.status_code}")
141
+ log.error(f"Response details: {e.response.text}")
142
+ raise LLMException(e.response.status_code, e.response.text)
143
+ raise LLMException(code=500, message=error_message)
133
144
 
145
+ # from env_config import Configuration
146
+ # config = Configuration("/Users/haoyang/work/pycharmWorkspace/gomyck-py-plugins/ai/klmy-entry_get/.env")
147
+ #
148
+ # async def run():
149
+ # llm = LLMClient(config.get_llm_api_key(), model_name="FunAudioLLM/SenseVoiceSmall")
150
+ # res = await llm.voice_2_text(file_path="/Users/haoyang/Downloads/audio.wav")
151
+ # print(res)
152
+ #
153
+ # if __name__ == '__main__':
154
+ # asyncio.run(run())
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/6/9 09:02'
5
+
6
+ from ctools.web.api_result import R
7
+
8
+
9
+ class LLMException(Exception):
10
+
11
+ def __init__(self, code, message):
12
+ super(LLMException, self).__init__(message)
13
+ self.code = code
14
+ self.message = message
15
+
16
+ def __str__(self):
17
+ return R.error(resp=R.Code.cus_code(self.code, self.message))
@@ -5,6 +5,7 @@ __date__ = '2025/5/16 16:49'
5
5
 
6
6
  import asyncio
7
7
  import json
8
+ import os
8
9
  import shutil
9
10
  from contextlib import AsyncExitStack, asynccontextmanager
10
11
  from typing import Any
@@ -157,10 +158,13 @@ class MCPClient:
157
158
  if self.config.get('server_type') is None or self.config.get('server_type') == 'stdio':
158
159
  command = (shutil.which("npx") if self.config["command"] == "npx" else self.config["command"])
159
160
  if command is None: raise ValueError("The command must be a valid string and cannot be None.")
161
+ env = os.environ.copy()
162
+ custom_env = self.config.get("env", {})
163
+ env.update(custom_env)
160
164
  server_params = StdioServerParameters(
161
165
  command=command,
162
166
  args=self.config["args"],
163
- env=self.config["env"] if self.config.get("env") else None,
167
+ env=env,
164
168
  )
165
169
  stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
166
170
  read, write = stdio_transport
@@ -0,0 +1,99 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/6/9 09:49'
5
+
6
+ import asyncio
7
+ import json
8
+ import sys
9
+ import uuid
10
+
11
+ from aiohttp import web
12
+
13
+ import base64
14
+ import mimetypes
15
+
16
+ class ROLE:
17
+ ASSISTANT = "assistant"
18
+ USER = "user"
19
+ SYSTEM = "system"
20
+
21
+ def build_message(role_type: ROLE, content):
22
+ return {"role": role_type, "content": content}
23
+
24
+ def build_image_message(content: str, file: bytes=None, file_path: str=None):
25
+ rep = _get_image_data_and_mime(file, file_path)
26
+ img_content = [{
27
+ "type": "image_url",
28
+ "image_url": {
29
+ "url": f'data:{rep["mime_type"]};base64,{rep["data"]}'
30
+ }
31
+ }, {
32
+ "type": "text",
33
+ "text": content
34
+ }]
35
+ return build_message(ROLE.USER, img_content)
36
+
37
+ def build_call_back(debug=False):
38
+ call_id = uuid.uuid4()
39
+ queue = asyncio.Queue()
40
+ async def on_msg(cid, role, msg):
41
+ if debug: print(msg, file=sys.__stdout__, end='')
42
+ await queue.put({"id": cid, "role": role, "msg": msg})
43
+ async def on_final(cid, is_final, msg):
44
+ if debug: print(cid, is_final, msg, file=sys.__stdout__)
45
+ if is_final:
46
+ await queue.put("[DONE]")
47
+ else:
48
+ nonlocal call_id
49
+ call_id = uuid.uuid4()
50
+ def get_call_id():
51
+ return call_id.hex
52
+ async def process_sse_resp(response: web.StreamResponse, e: Exception = None):
53
+ if e:
54
+ await response.write(b"data: " + f'{{"code": 500, "error": "{e}"}}'.encode('utf-8') + b"\n\n")
55
+ await response.write(b"data: [DONE]\n\n")
56
+ return
57
+ while True:
58
+ msg = await queue.get()
59
+ if msg == "[DONE]":
60
+ await response.write(b"data: [DONE]\n\n")
61
+ break
62
+ await response.write(f"data: {json.dumps(msg)}\n\n".encode("utf-8"))
63
+ return process_sse_resp, {"get_call_id": get_call_id, "get_event_msg_func": on_msg, "get_full_msg_func": on_final}
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
78
+
79
+
80
+
81
+
82
+
83
+
84
+ def _get_image_data_and_mime(file: bytes = None, file_path: str = None):
85
+ if file_path:
86
+ with open(file_path, "rb") as f:
87
+ file = f.read()
88
+ if not file:
89
+ raise ValueError("file 和 file_path 至少要提供一个")
90
+ mime_type = "application/octet-stream"
91
+ if file_path:
92
+ mime_type_guess, _ = mimetypes.guess_type(file_path)
93
+ if mime_type_guess:
94
+ mime_type = mime_type_guess
95
+ data = base64.b64encode(file).decode("utf-8")
96
+ return {
97
+ "mime_type": mime_type,
98
+ "data": data
99
+ }
@@ -8,6 +8,5 @@ text = """
8
8
  """
9
9
 
10
10
  results = extract_all_xml_blocks(text)
11
- print(results)
12
11
  for xml_block in results:
13
12
  print(xml_block)