gomyck-tools 1.0.0__py3-none-any.whl → 1.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. ctools/__init__.py +21 -0
  2. ctools/ai/__init__.py +4 -0
  3. ctools/ai/llm_chat.py +184 -0
  4. ctools/ai/llm_client.py +163 -0
  5. ctools/ai/llm_exception.py +17 -0
  6. ctools/ai/mcp/__init__.py +4 -0
  7. ctools/ai/mcp/mcp_client.py +326 -0
  8. ctools/ai/tools/__init__.py +4 -0
  9. ctools/ai/tools/json_extract.py +202 -0
  10. ctools/ai/tools/quick_tools.py +149 -0
  11. ctools/ai/tools/think_process.py +11 -0
  12. ctools/ai/tools/tool_use_xml_parse.py +40 -0
  13. ctools/ai/tools/xml_extract.py +15 -0
  14. ctools/application.py +50 -47
  15. ctools/aspect.py +65 -0
  16. ctools/auto/__init__.py +4 -0
  17. ctools/{browser_element_tools.py → auto/browser_element.py} +18 -8
  18. ctools/{plan_area_tools.py → auto/plan_area.py} +5 -7
  19. ctools/{pty_tools.py → auto/pty_process.py} +6 -3
  20. ctools/{resource_bundle_tools.py → auto/resource_bundle.py} +4 -4
  21. ctools/{screenshot_tools.py → auto/screenshot.py} +7 -6
  22. ctools/{win_canvas.py → auto/win_canvas.py} +10 -4
  23. ctools/{win_control.py → auto/win_control.py} +14 -5
  24. ctools/call.py +34 -49
  25. ctools/cdate.py +84 -0
  26. ctools/cdebug.py +146 -0
  27. ctools/cid.py +20 -0
  28. ctools/cipher/__init__.py +4 -0
  29. ctools/{aes_tools.py → cipher/aes_util.py} +10 -0
  30. ctools/{b64.py → cipher/b64.py} +2 -0
  31. ctools/cipher/czip.py +133 -0
  32. ctools/cipher/rsa.py +75 -0
  33. ctools/{sign.py → cipher/sign.py} +2 -1
  34. ctools/{sm_tools.py → cipher/sm_util.py} +20 -4
  35. ctools/cjson.py +10 -10
  36. ctools/cron_lite.py +109 -97
  37. ctools/database/__init__.py +4 -0
  38. ctools/{database.py → database/database.py} +93 -26
  39. ctools/dict_wrapper.py +21 -0
  40. ctools/ex.py +4 -0
  41. ctools/geo/__init__.py +4 -0
  42. ctools/geo/coord_trans.py +127 -0
  43. ctools/geo/douglas_rarefy.py +139 -0
  44. ctools/metrics.py +56 -63
  45. ctools/office/__init__.py +4 -0
  46. ctools/office/cword.py +30 -0
  47. ctools/{word_fill.py → office/word_fill.py} +3 -6
  48. ctools/patch.py +88 -0
  49. ctools/{work_path.py → path_info.py} +34 -2
  50. ctools/pkg/__init__.py +4 -0
  51. ctools/pkg/dynamic_imp.py +38 -0
  52. ctools/pools/__init__.py +4 -0
  53. ctools/pools/process_pool.py +41 -0
  54. ctools/{thread_pool.py → pools/thread_pool.py} +13 -4
  55. ctools/similar.py +25 -0
  56. ctools/stream/__init__.py +4 -0
  57. ctools/stream/ckafka.py +164 -0
  58. ctools/stream/credis.py +127 -0
  59. ctools/{mqtt_utils.py → stream/mqtt_utils.py} +20 -12
  60. ctools/sys_info.py +36 -13
  61. ctools/sys_log.py +46 -27
  62. ctools/util/__init__.py +4 -0
  63. ctools/util/cftp.py +76 -0
  64. ctools/util/cklock.py +118 -0
  65. ctools/util/config_util.py +52 -0
  66. ctools/util/env_config.py +63 -0
  67. ctools/{html_soup.py → util/html_soup.py} +0 -7
  68. ctools/{http_utils.py → util/http_util.py} +4 -2
  69. ctools/{images_tools.py → util/image_process.py} +10 -1
  70. ctools/util/jb_cut.py +54 -0
  71. ctools/{id_worker_tools.py → util/snow_id.py} +8 -23
  72. ctools/web/__init__.py +4 -0
  73. ctools/web/aio_web_server.py +186 -0
  74. ctools/web/api_result.py +56 -0
  75. ctools/web/bottle_web_base.py +239 -0
  76. ctools/web/bottle_webserver.py +191 -0
  77. ctools/web/bottle_websocket.py +79 -0
  78. ctools/web/ctoken.py +103 -0
  79. ctools/{download_tools.py → web/download_util.py} +14 -12
  80. ctools/web/params_util.py +46 -0
  81. ctools/{upload_tools.py → web/upload_util.py} +3 -2
  82. gomyck_tools-1.4.7.dist-info/METADATA +70 -0
  83. gomyck_tools-1.4.7.dist-info/RECORD +88 -0
  84. {gomyck_tools-1.0.0.dist-info → gomyck_tools-1.4.7.dist-info}/WHEEL +1 -1
  85. gomyck_tools-1.4.7.dist-info/licenses/LICENSE +13 -0
  86. ctools/bashPath.py +0 -13
  87. ctools/bottle_server.py +0 -49
  88. ctools/console.py +0 -55
  89. ctools/date_utils.py +0 -44
  90. ctools/enums.py +0 -4
  91. ctools/excelOpt.py +0 -36
  92. ctools/imgDialog.py +0 -44
  93. ctools/license.py +0 -37
  94. ctools/log.py +0 -28
  95. ctools/mvc.py +0 -56
  96. ctools/obj.py +0 -20
  97. ctools/pacth.py +0 -73
  98. ctools/ssh.py +0 -9
  99. ctools/strDiff.py +0 -20
  100. ctools/string_tools.py +0 -101
  101. ctools/token_tools.py +0 -13
  102. ctools/wordFill.py +0 -24
  103. gomyck_tools-1.0.0.dist-info/METADATA +0 -20
  104. gomyck_tools-1.0.0.dist-info/RECORD +0 -54
  105. /ctools/{word_fill_entity.py → office/word_fill_entity.py} +0 -0
  106. /ctools/{compile_tools.py → util/compile_util.py} +0 -0
  107. {gomyck_tools-1.0.0.dist-info → gomyck_tools-1.4.7.dist-info}/top_level.txt +0 -0
ctools/__init__.py CHANGED
@@ -0,0 +1,21 @@
1
+ import asyncio
2
+ import importlib.util
3
+
4
+ banner = """
5
+
6
+ ██████╗████████╗ ██████╗ ██████╗ ██╗ ███████╗
7
+ ██╔════╝╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔════╝
8
+ ██║ ██║ ██║ ██║██║ ██║██║ ███████╗
9
+ ██║ ██║ ██║ ██║██║ ██║██║ ╚════██║
10
+ ╚██████╗ ██║ ╚██████╔╝╚██████╔╝███████╗███████║
11
+ ╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝ --by gomyck 2025
12
+ """
13
+
14
+ print(banner)
15
+
16
+ if importlib.util.find_spec("uvloop"):
17
+ import uvloop
18
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
19
+ print("uvloop version:", uvloop.__version__)
20
+ else:
21
+ print("uvloop not installed, using default asyncio loop")
ctools/ai/__init__.py ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/22 15:56'
ctools/ai/llm_chat.py ADDED
@@ -0,0 +1,184 @@
1
+ from ctools import sys_log
2
+ from ctools.ai.llm_client import LLMClient
3
+ from ctools.ai.llm_exception import LLMException
4
+ from ctools.ai.mcp.mcp_client import MCPClient, res_has_img, is_image_content, get_tools_prompt, mcp_tool_call
5
+ from ctools.ai.tools.quick_tools import build_message, ROLE
6
+ from ctools.ai.tools.think_process import remove_think_blocks
7
+ from ctools.ai.tools.xml_extract import extract_all_xml_blocks
8
+
9
+ log = sys_log.flog
10
+
11
+ continue_prompt_default = """
12
+ 1.请继续处理尚未完成的内容,跳过所有已处理完成的部分.
13
+ 2.工具调用时,请参考上一次工具调用的参数,仅对偏移量相关的参数进行调整,以接续上一次处理的进度.
14
+ 3.如果你认为所有数据都处理完毕, 请输出标记:{end_flag}.
15
+ """
16
+
17
+
18
+ class ChatSession:
19
+
20
+ def __init__(self, prompts: str, llm_client: LLMClient, max_tools_call: int = 10, mcp_clients: list[MCPClient] = None,
21
+ auto_complete: bool = False, end_flag: str = "EOF", continue_prompt: str = continue_prompt_default) -> None:
22
+ """
23
+ 初始化聊天
24
+ :param prompts: 提示词
25
+ :param llm_client: llm 客户端
26
+ :param max_tools_call: 最大单次工具调用次数
27
+ :param mcp_clients: mcp_clients
28
+ :param auto_complete: 是否自动完成
29
+ :param end_flag: 结束标记
30
+ :param continue_prompt: 自动完成时的继续提示
31
+ """
32
+ self.mcp_clients: list[MCPClient] = mcp_clients
33
+ self.llm_client: LLMClient = llm_client
34
+ self.prompts: str = prompts
35
+ self.max_tools_call = max_tools_call
36
+ self.auto_complete = auto_complete
37
+ self.end_flag = end_flag
38
+ self.continue_prompt = continue_prompt.format(end_flag=self.end_flag)
39
+
40
+ self.current_message = {}
41
+ self.full_messages = []
42
+ self.res_code = 200
43
+
44
+ async def init_prompts(self, user_system_prompt):
45
+ if self.mcp_clients:
46
+ if user_system_prompt:
47
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, user_system_prompt)
48
+ elif self.prompts:
49
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, self.prompts)
50
+ else:
51
+ mcp_tools_prompt = await get_tools_prompt(self.mcp_clients, "")
52
+ self.full_messages.append(build_message(ROLE.SYSTEM, mcp_tools_prompt))
53
+ # log.info(mcp_tools_prompt)
54
+ else:
55
+ if user_system_prompt:
56
+ self.full_messages.append(build_message(ROLE.SYSTEM, user_system_prompt))
57
+ elif self.prompts:
58
+ self.full_messages.append(build_message(ROLE.SYSTEM, self.prompts))
59
+
60
+ async def chat(self, user_input: [str], get_call_id: callable(str) = lambda: "None", get_event_msg_func: callable(str) = None, get_full_msg_func: callable(str) = None):
61
+ """
62
+ 对话
63
+ Parameters
64
+ ----------
65
+ user_input 用户输入 [{"role": "user", "content": "hello"}]
66
+ get_call_id 本次对话 ID func
67
+ get_event_msg_func(get_call_id(), role, msg) 获取实时回复(流式回答)
68
+ get_full_msg_func(get_call_id(), is_final, msg) 获取完整的回答列表
69
+ -------
70
+ """
71
+ # 获取 prompt
72
+ if type(user_input) == dict: user_input = [user_input]
73
+ user_system_prompt = user_input[0]["content"] if user_input[0]["role"] == "system" else ""
74
+ user_input = user_input[1:] if user_input[0]["role"] == "system" else user_input
75
+ await self.init_prompts(user_system_prompt)
76
+ try:
77
+ self.full_messages.extend(user_input)
78
+ last_user_input = next((item["content"] for item in reversed(user_input) if item.get("role") == "user"), None)
79
+ current_process_index = 0
80
+ max_tools_call = self.max_tools_call
81
+ final_resp = False
82
+ while (current_process_index < max_tools_call and not final_resp) or (self.auto_complete and current_process_index < 100):
83
+ log.info("\n正在生成回答: %s", self.full_messages)
84
+ res = []
85
+ async for chunk in self.llm_client.model_completion(self.full_messages):
86
+ res.append(chunk)
87
+ await self.process_chunk_message(chunk, get_call_id, get_event_msg_func)
88
+ llm_response = "".join(res)
89
+ log.info("\n收到回答: %s", llm_response)
90
+ no_think_llm_response = remove_think_blocks(llm_response) # 处理掉 think, 然后再判断 EOF, 避免 think 里出现 EOF
91
+ if self.end_flag in no_think_llm_response:
92
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response.replace(self.end_flag, ""))) # 去掉 EOF
93
+ current_process_index = 999
94
+ final_resp = True
95
+ await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
96
+ else:
97
+ xml_blocks = extract_all_xml_blocks(llm_response)
98
+ if xml_blocks:
99
+ for xml_block in xml_blocks:
100
+ tool_call_result = await mcp_tool_call(self.mcp_clients, xml_block)
101
+ log.info("\nMCP调用结果: %s", tool_call_result)
102
+ current_process_index += 1
103
+ if tool_call_result == xml_block:
104
+ self.full_messages.append(build_message(ROLE.USER, "工具调用出现错误, 请重试."))
105
+ elif current_process_index == max_tools_call - 1:
106
+ await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
107
+ self.full_messages.append(build_message(ROLE.USER, "调用次数已达上限, 请直接回答.")) # 不能调换顺序
108
+ else:
109
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response)) # 不能调换顺序
110
+ await self.add_tool_call_res_2_message(last_user_input, tool_call_result)
111
+ await self.process_tool_call_message(get_call_id, get_event_msg_func, tool_call_result)
112
+ # 工具调用, 说明没有结束对话, 要继续执行
113
+ final_resp = False
114
+ else:
115
+ self.full_messages.append(build_message(ROLE.ASSISTANT, llm_response))
116
+ if self.auto_complete: self.full_messages.append(build_message(ROLE.USER, self.continue_prompt))
117
+ final_resp = True
118
+ await self.process_full_message(final_resp, get_call_id, get_full_msg_func)
119
+ except Exception as e:
120
+ log.exception(e)
121
+ if isinstance(e, LLMException):
122
+ error_code = e.code
123
+ error_msg = '系统出现错误, 请重试: {}-{}'.format(e.code, e.message)
124
+ else:
125
+ error_code = 500
126
+ error_msg = '系统出现错误, 请重试: {}'.format(e)
127
+ self.full_messages.append(build_message(ROLE.ASSISTANT, error_msg))
128
+ await self.process_error_message(error_code, error_msg, get_call_id, get_event_msg_func, get_full_msg_func)
129
+ finally:
130
+ return self.current_message
131
+
132
+ async def process_error_message(self, code, error_msg, get_call_id, get_event_msg_func, get_full_msg_func):
133
+ # 最终结果通知前端+实时通知都要有
134
+ self.res_code = code
135
+ self.current_message = error_msg
136
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.ASSISTANT, self.current_message)
137
+ if get_full_msg_func: await get_full_msg_func(get_call_id(), True, self.full_messages)
138
+
139
+ async def process_chunk_message(self, chunk, get_call_id, get_event_msg_func):
140
+ # 实时通知前端
141
+ self.current_message = chunk
142
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.ASSISTANT, self.current_message)
143
+
144
+ async def process_tool_call_message(self, get_call_id, get_event_msg_func, tool_call_result):
145
+ # 实时通知前端(工具调用特殊通知一次, 输出的是工具返回的结果)
146
+ # 如果是图片结果, 就是 user 消息(必须是 user, 否则 api 报错), 否则是 system(现在统一都改成 user 了, 看看后面有没有改回 system 的必要)
147
+ self.current_message = tool_call_result["result"] if res_has_img(tool_call_result) else tool_call_result
148
+ if get_event_msg_func: await get_event_msg_func(get_call_id(), ROLE.USER, self.current_message)
149
+
150
+ async def process_full_message(self, final_resp, get_call_id, get_full_msg_func):
151
+ """
152
+ 全量消息回调函数
153
+ :param final_resp: 最终响应信息
154
+ :param get_call_id: 调用 ID
155
+ :param get_full_msg_func: 回调的函数
156
+ """
157
+ self.current_message = self.full_messages[-1]["content"]
158
+ if get_full_msg_func: await get_full_msg_func(get_call_id(), final_resp, self.full_messages)
159
+
160
+ async def add_tool_call_res_2_message(self, last_user_input, tool_call_result: dict):
161
+ """
162
+ 添加当前会话结果, 以便于用当前 chat 对象取值
163
+ :param last_user_input: 客户端最后一次输入
164
+ :param tool_call_result: 工具调用结果
165
+ """
166
+ if type(tool_call_result) != dict: return
167
+ response: [] = tool_call_result.get("result")
168
+ image_content = []
169
+ for rep in response:
170
+ if not is_image_content(rep):
171
+ self.full_messages.append(build_message(ROLE.USER, str(rep)))
172
+ else:
173
+ image_content.append({
174
+ "type": "image_url",
175
+ "image_url": {
176
+ "url": f'data:{rep["mime_type"]};base64,{rep["data"]}'
177
+ }
178
+ })
179
+ if image_content:
180
+ image_content.append({
181
+ "type": "text",
182
+ "text": last_user_input
183
+ })
184
+ self.full_messages.append(build_message(ROLE.USER, image_content))
@@ -0,0 +1,163 @@
1
+ import logging
2
+ import os
3
+
4
+ import httpx
5
+
6
+ from ctools import sys_log, cjson, call
7
+ from ctools.util.env_config import float_env, bool_env, int_env
8
+ from ctools.ai.llm_exception import LLMException
9
+
10
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
11
+ logging.getLogger("httpx").setLevel(logging.WARNING)
12
+ logging.getLogger("mcp.client.sse").setLevel(logging.WARNING)
13
+
14
+ log = sys_log.flog
15
+ shared_client = None
16
+
17
+ def process_SSE(line):
18
+ if not line: return None
19
+ if line.startswith("data: "):
20
+ data = line[6:]
21
+ if data == "[DONE]":
22
+ return "[DONE]"
23
+ return data
24
+
25
+ @call.once
26
+ def init_shared_client():
27
+ global shared_client
28
+ shared_client = httpx.AsyncClient(
29
+ base_url=os.getenv("LLM_BASE_URL", "https://api.siliconflow.cn/v1/"),
30
+ timeout=httpx.Timeout(connect=10.0, read=60.0, write=10.0, pool=5.0),
31
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
32
+ )
33
+ return shared_client
34
+
35
+ class LLMClient:
36
+ """Manages communication with the LLM provider."""
37
+
38
+ def __init__(self,
39
+ api_key: str = "",
40
+ model_name: str = "",
41
+ temperature: float = None,
42
+ stream: bool = None,
43
+ thinking: bool = None,
44
+ thinking_budget: int = None,
45
+ max_tokens: int = None,
46
+ top_p: float = None,
47
+ top_k: int = None,
48
+ frequency_penalty: float = None
49
+ ) -> None:
50
+ self.api_key = api_key or os.getenv("LLM_API_KEY")
51
+ self.model_name = model_name or os.getenv("LLM_MODEL_NAME", "Qwen/Qwen3-235B-A22B")
52
+ self.temperature = temperature or float_env("LLM_TEMPERATURE", 0.8)
53
+ self.stream = stream or bool_env("LLM_STREAM", True)
54
+ self.thinking = thinking or bool_env("LLM_THINKING", True)
55
+ self.thinking_budget = thinking_budget or int_env("LLM_THINKING_BUDGET", 4096)
56
+ self.max_tokens = max_tokens or int_env("LLM_MAX_TOKENS", 4096)
57
+ self.top_p = top_p or float_env("LLM_TOP_P", 0.5)
58
+ self.top_k = top_k or int_env("LLM_TOP_K", 50)
59
+ self.frequency_penalty = frequency_penalty or float_env("LLM_FREQUENCY_PENALTY", 0)
60
+ init_shared_client()
61
+
62
+ async def model_completion(self, messages: list[dict[str, str]]):
63
+ self.no_think_compatible(messages)
64
+ headers = {
65
+ "Content-Type": "application/json",
66
+ "Authorization": f"Bearer {self.api_key}",
67
+ }
68
+ payload = {
69
+ "messages": messages,
70
+ "model": self.model_name,
71
+ "temperature": self.temperature,
72
+ "max_tokens": self.max_tokens,
73
+ "top_p": self.top_p,
74
+ "top_k": self.top_k,
75
+ "frequency_penalty": self.frequency_penalty,
76
+ "stream": self.stream,
77
+ "enable_thinking": self.thinking,
78
+ "thinking_budget": self.thinking_budget
79
+ }
80
+ try:
81
+ req_url = "chat/completions"
82
+ if self.stream:
83
+ async with shared_client.stream("POST", req_url, headers=headers, json=payload) as response:
84
+ response.raise_for_status()
85
+ start_think = False
86
+ end_think = False
87
+ async for line in response.aiter_lines():
88
+ data = process_SSE(line)
89
+ if not data or data == "[DONE]":
90
+ continue
91
+ choice = cjson.loads(data)["choices"][0]
92
+ if "message" in choice:
93
+ content = choice["message"]["content"]
94
+ else:
95
+ content = choice["delta"].get("content", "")
96
+ reasoning_content = choice["delta"].get("reasoning_content", "")
97
+ if not start_think and not content and reasoning_content:
98
+ content = f"<think>{reasoning_content}"
99
+ start_think = True
100
+ if not end_think and start_think and not reasoning_content:
101
+ content = f"</think>{content}"
102
+ end_think = True
103
+ if not content:
104
+ content = reasoning_content
105
+ if content:
106
+ yield content
107
+ else:
108
+ response = await shared_client.post(req_url, headers=headers, json=payload)
109
+ response.raise_for_status()
110
+ content = response.json()["choices"][0]["message"]["content"]
111
+ yield content
112
+ except Exception as e:
113
+ error_message = f"Error getting LLM response: {str(e)}"
114
+ log.error(error_message)
115
+ if isinstance(e, httpx.ReadTimeout):
116
+ raise LLMException(code=500, message="模型访问超时")
117
+ if isinstance(e, httpx.HTTPStatusError):
118
+ log.error(f"Status code: {e.response.status_code}")
119
+ log.error(f"Response details: {e.response.text}")
120
+ raise LLMException(e.response.status_code, e.response.text)
121
+ raise LLMException(code=500, message=error_message)
122
+
123
+ def no_think_compatible(self, messages):
124
+ if not self.thinking and "qwen3" in self.model_name.lower():
125
+ for msg in messages:
126
+ if msg and msg.get("role") in ("user", "system") and "/no_think" not in msg.get("content", ""):
127
+ msg["content"] += " /no_think"
128
+
129
+ async def voice_2_text(self, file: bytes = None, file_path: str = None):
130
+ try:
131
+ if file_path:
132
+ with open(file_path, "rb") as f:
133
+ file = f.read()
134
+ req_url = "/audio/transcriptions"
135
+ headers = {
136
+ "Authorization": f"Bearer {self.api_key}",
137
+ }
138
+ files = {
139
+ "model": (None, self.model_name),
140
+ "file": ("audio.wav", file, "audio/wav"),
141
+ }
142
+ response = await shared_client.post(req_url, headers=headers, files=files)
143
+ response.raise_for_status()
144
+ return response.json()["text"]
145
+ except Exception as e:
146
+ error_message = f"Error getting LLM response: {str(e)}"
147
+ log.error(error_message)
148
+ if isinstance(e, httpx.HTTPStatusError):
149
+ log.error(f"Status code: {e.response.status_code}")
150
+ log.error(f"Response details: {e.response.text}")
151
+ raise LLMException(e.response.status_code, e.response.text)
152
+ raise LLMException(code=500, message=error_message)
153
+
154
+ # from env_config import Configuration
155
+ # config = Configuration("/Users/haoyang/work/pycharmWorkspace/gomyck-py-plugins/ai/klmy-entry_get/.env")
156
+ #
157
+ # async def run():
158
+ # llm = LLMClient(config.get_llm_api_key(), model_name="FunAudioLLM/SenseVoiceSmall")
159
+ # res = await llm.voice_2_text(file_path="/Users/haoyang/Downloads/audio.wav")
160
+ # print(res)
161
+ #
162
+ # if __name__ == '__main__':
163
+ # asyncio.run(run())
@@ -0,0 +1,17 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/6/9 09:02'
5
+
6
+ from ctools.web.api_result import R
7
+
8
+
9
+ class LLMException(Exception):
10
+
11
+ def __init__(self, code, message):
12
+ super(LLMException, self).__init__(message)
13
+ self.code = code
14
+ self.message = message
15
+
16
+ def __str__(self):
17
+ return R.error(resp=R.Code.cus_code(self.code, self.message))
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2025/5/22 15:56'