vectorvein 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/__init__.py +0 -0
- vectorvein/chat_clients/__init__.py +110 -0
- vectorvein/chat_clients/anthropic_client.py +450 -0
- vectorvein/chat_clients/base_client.py +91 -0
- vectorvein/chat_clients/deepseek_client.py +15 -0
- vectorvein/chat_clients/gemini_client.py +317 -0
- vectorvein/chat_clients/groq_client.py +15 -0
- vectorvein/chat_clients/local_client.py +14 -0
- vectorvein/chat_clients/minimax_client.py +315 -0
- vectorvein/chat_clients/mistral_client.py +15 -0
- vectorvein/chat_clients/moonshot_client.py +15 -0
- vectorvein/chat_clients/openai_client.py +15 -0
- vectorvein/chat_clients/openai_compatible_client.py +291 -0
- vectorvein/chat_clients/qwen_client.py +15 -0
- vectorvein/chat_clients/utils.py +635 -0
- vectorvein/chat_clients/yi_client.py +15 -0
- vectorvein/chat_clients/zhipuai_client.py +15 -0
- vectorvein/settings/__init__.py +71 -0
- vectorvein/types/defaults.py +396 -0
- vectorvein/types/enums.py +83 -0
- vectorvein/types/llm_parameters.py +69 -0
- vectorvein/utilities/media_processing.py +70 -0
- vectorvein-0.1.0.dist-info/METADATA +16 -0
- vectorvein-0.1.0.dist-info/RECORD +25 -0
- vectorvein-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,635 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-26 14:48:55
|
3
|
+
import re
|
4
|
+
import json
|
5
|
+
|
6
|
+
import tiktoken
|
7
|
+
|
8
|
+
from ..types.enums import BackendType
|
9
|
+
from ..utilities.media_processing import ImageProcessor
|
10
|
+
|
11
|
+
|
12
|
+
chatgpt_encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
13
|
+
gpt_4o_encoding = tiktoken.encoding_for_model("gpt-4o")
|
14
|
+
|
15
|
+
tool_use_re = re.compile(r"<\|▶\|>(.*?)<\|◀\|>", re.DOTALL)
|
16
|
+
|
17
|
+
|
18
|
+
def get_assistant_role_key(backend: BackendType) -> str:
|
19
|
+
if backend == BackendType.Gemini:
|
20
|
+
return "model"
|
21
|
+
else:
|
22
|
+
return "assistant"
|
23
|
+
|
24
|
+
|
25
|
+
def get_content_key(backend: BackendType) -> str:
|
26
|
+
if backend == BackendType.Gemini:
|
27
|
+
return "parts"
|
28
|
+
else:
|
29
|
+
return "content"
|
30
|
+
|
31
|
+
|
32
|
+
def convert_type(value, value_type):
|
33
|
+
if value_type == "string":
|
34
|
+
return str(value)
|
35
|
+
elif value_type == "number":
|
36
|
+
try:
|
37
|
+
return float(value)
|
38
|
+
except ValueError:
|
39
|
+
return value
|
40
|
+
elif value_type == "integer":
|
41
|
+
try:
|
42
|
+
return int(value)
|
43
|
+
except ValueError:
|
44
|
+
return value
|
45
|
+
elif value_type == "boolean":
|
46
|
+
return value.lower() in ("true", "1", "t")
|
47
|
+
else:
|
48
|
+
return value # 如果类型未知,返回原始值
|
49
|
+
|
50
|
+
|
51
|
+
def get_token_counts(text: str, model: str = "") -> int:
|
52
|
+
if not isinstance(text, str):
|
53
|
+
text = str(text)
|
54
|
+
if model == "gpt-3.5-turbo":
|
55
|
+
return len(chatgpt_encoding.encode(text))
|
56
|
+
elif model == "gpt-4o":
|
57
|
+
return len(gpt_4o_encoding.encode(text))
|
58
|
+
elif model.startswith("abab"):
|
59
|
+
return int(len(text) / 1.33)
|
60
|
+
else:
|
61
|
+
return len(chatgpt_encoding.encode(text))
|
62
|
+
|
63
|
+
|
64
|
+
def cutoff_messages(
|
65
|
+
messages: list,
|
66
|
+
max_count: int = 16000,
|
67
|
+
backend: BackendType = BackendType.OpenAI,
|
68
|
+
model: str = "",
|
69
|
+
) -> list:
|
70
|
+
"""
|
71
|
+
给定一个消息列表和最大长度,将消息列表截断到最大长度。
|
72
|
+
如果列表中第一个元素的role是'system',则始终保留该元素。
|
73
|
+
超过长度时从列表开始处(第二个元素起)依次删除消息,直到总长度小于等于最大长度。
|
74
|
+
如果最后一条消息超过了最大长度,那么将最后一条消息截断到最大长度。
|
75
|
+
|
76
|
+
Args:
|
77
|
+
messages (list): 消息列表,每条消息是一个包含'role'和'content'的字典。
|
78
|
+
max_count (int, optional): 允许的最大长度。默认值为16000。
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
list: 截断后的消息列表。
|
82
|
+
"""
|
83
|
+
|
84
|
+
if len(messages) == 0:
|
85
|
+
return messages
|
86
|
+
|
87
|
+
messages_length = 0
|
88
|
+
content_key = get_content_key(backend)
|
89
|
+
|
90
|
+
# 先检查并保留第一条system消息(如果有)
|
91
|
+
system_message = None
|
92
|
+
if messages[0]["role"] == "system":
|
93
|
+
system_message = messages[0]
|
94
|
+
system_message_length = get_token_counts(system_message[content_key], model)
|
95
|
+
if system_message_length > max_count:
|
96
|
+
# 如果第一条system消息超过最大长度,截断它
|
97
|
+
system_message[content_key] = system_message[content_key][-max_count:]
|
98
|
+
return [system_message]
|
99
|
+
else:
|
100
|
+
messages_length += system_message_length
|
101
|
+
messages = messages[1:] # 移除第一个元素,以处理其余消息
|
102
|
+
|
103
|
+
if system_message:
|
104
|
+
system_message = [system_message]
|
105
|
+
else:
|
106
|
+
system_message = []
|
107
|
+
|
108
|
+
for index, message in enumerate(reversed(messages)):
|
109
|
+
if not message[content_key]:
|
110
|
+
continue
|
111
|
+
count = 0
|
112
|
+
if isinstance(message[content_key], str):
|
113
|
+
contents = [message[content_key]]
|
114
|
+
elif isinstance(message[content_key], list):
|
115
|
+
contents = message[content_key]
|
116
|
+
else:
|
117
|
+
contents = [str(message[content_key])]
|
118
|
+
|
119
|
+
for content in contents:
|
120
|
+
# TODO: Add non text token counts
|
121
|
+
if isinstance(content, dict) and "text" not in content:
|
122
|
+
continue
|
123
|
+
if isinstance(content, dict):
|
124
|
+
content_text = content["text"]
|
125
|
+
else:
|
126
|
+
content_text = str(content)
|
127
|
+
count += get_token_counts(content_text, model)
|
128
|
+
messages_length += count
|
129
|
+
if messages_length < max_count:
|
130
|
+
continue
|
131
|
+
if index == 0:
|
132
|
+
# 一条消息就超过长度则将该消息内容进行截断,保留该消息最后的一部分
|
133
|
+
if backend == BackendType.Gemini:
|
134
|
+
message[content_key] = [{"text": message[content_key][-max_count:]}]
|
135
|
+
else:
|
136
|
+
content = message[content_key][max_count - messages_length :]
|
137
|
+
return system_message + [
|
138
|
+
{
|
139
|
+
"role": message["role"],
|
140
|
+
content_key: content,
|
141
|
+
}
|
142
|
+
]
|
143
|
+
return system_message + messages[-index:]
|
144
|
+
return system_message + messages
|
145
|
+
|
146
|
+
|
147
|
+
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI) -> dict:
|
148
|
+
image_processor = ImageProcessor(image_source=image)
|
149
|
+
if backend == BackendType.OpenAI:
|
150
|
+
return {
|
151
|
+
"type": "image_url",
|
152
|
+
"image_url": {"url": image_processor.data_url},
|
153
|
+
}
|
154
|
+
elif backend == BackendType.Anthropic:
|
155
|
+
return {
|
156
|
+
"type": "image",
|
157
|
+
"source": {
|
158
|
+
"type": "base64",
|
159
|
+
"media_type": image_processor.mime_type,
|
160
|
+
"data": image_processor.base64_image,
|
161
|
+
},
|
162
|
+
}
|
163
|
+
elif backend == BackendType.Gemini:
|
164
|
+
return {
|
165
|
+
"inline_data": {
|
166
|
+
"mime_type": image_processor.mime_type,
|
167
|
+
"data": image_processor.base64_image,
|
168
|
+
}
|
169
|
+
}
|
170
|
+
else:
|
171
|
+
return {
|
172
|
+
"type": "image_url",
|
173
|
+
"image_url": {"url": image_processor.data_url},
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
def format_workflow_messages(message, content, backend):
|
178
|
+
formatted_messages = []
|
179
|
+
|
180
|
+
# 工具调用消息
|
181
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
182
|
+
tool_call_message = {
|
183
|
+
"content": None,
|
184
|
+
"role": "assistant",
|
185
|
+
"tool_calls": [
|
186
|
+
{
|
187
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
188
|
+
"type": "function",
|
189
|
+
"function": {
|
190
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
191
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
192
|
+
},
|
193
|
+
}
|
194
|
+
],
|
195
|
+
}
|
196
|
+
elif backend == BackendType.Anthropic:
|
197
|
+
tool_call_message = {
|
198
|
+
"role": "assistant",
|
199
|
+
"content": [
|
200
|
+
{
|
201
|
+
"type": "tool_use",
|
202
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
203
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
204
|
+
"input": message["metadata"]["selected_workflow"]["params"],
|
205
|
+
},
|
206
|
+
],
|
207
|
+
}
|
208
|
+
if content:
|
209
|
+
tool_call_message["content"].insert(0, {"type": "text", "text": content})
|
210
|
+
elif backend == BackendType.Gemini:
|
211
|
+
tool_call_message = {
|
212
|
+
"role": "model",
|
213
|
+
"parts": [
|
214
|
+
{
|
215
|
+
"functionCall": {
|
216
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
217
|
+
"args": message["metadata"]["selected_workflow"]["params"],
|
218
|
+
}
|
219
|
+
},
|
220
|
+
],
|
221
|
+
}
|
222
|
+
if content:
|
223
|
+
tool_call_message["parts"].insert(0, {"text": content})
|
224
|
+
else:
|
225
|
+
tool_call_message = {
|
226
|
+
"content": json.dumps(
|
227
|
+
{
|
228
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
229
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
230
|
+
},
|
231
|
+
ensure_ascii=False,
|
232
|
+
),
|
233
|
+
"role": "assistant",
|
234
|
+
}
|
235
|
+
formatted_messages.append(tool_call_message)
|
236
|
+
|
237
|
+
# 工具调用结果消息
|
238
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
239
|
+
tool_call_result_message = {
|
240
|
+
"role": "tool",
|
241
|
+
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
242
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
243
|
+
"content": message["metadata"].get("workflow_result", ""),
|
244
|
+
}
|
245
|
+
elif backend == BackendType.Anthropic:
|
246
|
+
tool_call_result_message = {
|
247
|
+
"role": "user",
|
248
|
+
"content": [
|
249
|
+
{
|
250
|
+
"type": "tool_result",
|
251
|
+
"tool_use_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
252
|
+
"content": message["metadata"].get("workflow_result", ""),
|
253
|
+
}
|
254
|
+
],
|
255
|
+
}
|
256
|
+
elif backend == BackendType.Gemini:
|
257
|
+
tool_call_result_message = {
|
258
|
+
"role": "function",
|
259
|
+
"parts": [
|
260
|
+
{
|
261
|
+
"functionResponse": {
|
262
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
263
|
+
"response": {
|
264
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
265
|
+
"content": message["metadata"].get("workflow_result", ""),
|
266
|
+
},
|
267
|
+
}
|
268
|
+
}
|
269
|
+
],
|
270
|
+
}
|
271
|
+
else:
|
272
|
+
tool_call_result_message = {
|
273
|
+
"role": "user",
|
274
|
+
"content": json.dumps(
|
275
|
+
{
|
276
|
+
"function": message["metadata"]["selected_workflow"]["function_name"],
|
277
|
+
"result": message["metadata"].get("workflow_result", ""),
|
278
|
+
},
|
279
|
+
ensure_ascii=False,
|
280
|
+
),
|
281
|
+
}
|
282
|
+
formatted_messages.append(tool_call_result_message)
|
283
|
+
|
284
|
+
if content and backend not in (BackendType.Mistral, BackendType.Anthropic, BackendType.Gemini):
|
285
|
+
formatted_messages.append({"role": "assistant", "content": content})
|
286
|
+
|
287
|
+
return formatted_messages
|
288
|
+
|
289
|
+
|
290
|
+
def format_openai_message(message, backend):
|
291
|
+
role = message.get("role", "user")
|
292
|
+
content = message.get("content", "")
|
293
|
+
|
294
|
+
if backend == BackendType.Gemini:
|
295
|
+
if isinstance(content, list):
|
296
|
+
parts = []
|
297
|
+
for item in content:
|
298
|
+
if isinstance(item, str):
|
299
|
+
parts.append({"text": item})
|
300
|
+
elif isinstance(item, dict) and "type" in item:
|
301
|
+
if item["type"] == "image":
|
302
|
+
parts.append({"image": item["image"]})
|
303
|
+
elif item["type"] == "text":
|
304
|
+
parts.append({"text": item["text"]})
|
305
|
+
return {"role": "user" if role == "user" else "model", "parts": parts}
|
306
|
+
else:
|
307
|
+
return {"role": "user" if role == "user" else "model", "parts": [{"text": content}]}
|
308
|
+
elif backend == BackendType.Anthropic:
|
309
|
+
if isinstance(content, list):
|
310
|
+
formatted_content = []
|
311
|
+
for item in content:
|
312
|
+
if isinstance(item, str):
|
313
|
+
formatted_content.append({"type": "text", "text": item})
|
314
|
+
elif isinstance(item, dict) and "type" in item:
|
315
|
+
formatted_content.append(item)
|
316
|
+
return {"role": role, "content": formatted_content}
|
317
|
+
else:
|
318
|
+
return {"role": role, "content": content}
|
319
|
+
else:
|
320
|
+
return message # 对于其他后端,保持原样
|
321
|
+
|
322
|
+
|
323
|
+
def format_messages(
|
324
|
+
messages: list, backend: BackendType = BackendType.OpenAI, native_multimodal: bool = False
|
325
|
+
) -> list:
|
326
|
+
"""将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
|
327
|
+
|
328
|
+
Args:
|
329
|
+
messages (list): VectorVein 或 OpenAI messages list.
|
330
|
+
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
331
|
+
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
332
|
+
|
333
|
+
Returns:
|
334
|
+
list: 转换后的消息列表
|
335
|
+
"""
|
336
|
+
|
337
|
+
def is_vectorvein_message(message):
|
338
|
+
return "content_type" in message
|
339
|
+
|
340
|
+
backend = backend.lower()
|
341
|
+
formatted_messages = []
|
342
|
+
|
343
|
+
for message in messages:
|
344
|
+
if is_vectorvein_message(message):
|
345
|
+
# 处理 VectorVein 格式的消息
|
346
|
+
content = message["content"]["text"]
|
347
|
+
if message["content_type"] == "TXT":
|
348
|
+
role = "user" if message["author_type"] == "U" else get_assistant_role_key(backend)
|
349
|
+
formatted_message = format_text_message(
|
350
|
+
content, role, message.get("attachments", []), backend, native_multimodal
|
351
|
+
)
|
352
|
+
formatted_messages.append(formatted_message)
|
353
|
+
elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
|
354
|
+
formatted_messages.extend(format_workflow_messages(message, content, backend))
|
355
|
+
else:
|
356
|
+
# 处理 OpenAI 格式的消息
|
357
|
+
formatted_message = format_openai_message(message, backend)
|
358
|
+
formatted_messages.append(formatted_message)
|
359
|
+
|
360
|
+
return formatted_messages
|
361
|
+
|
362
|
+
|
363
|
+
def format_text_message(content, role, attachments, backend, native_multimodal):
|
364
|
+
images_extensions = ("jpg", "jpeg", "png", "bmp")
|
365
|
+
has_images = any(attachment.lower().endswith(images_extensions) for attachment in attachments)
|
366
|
+
|
367
|
+
if attachments:
|
368
|
+
content += "\n# Attachments:\n"
|
369
|
+
content += "\n".join([f"- {attachment}" for attachment in attachments])
|
370
|
+
|
371
|
+
if native_multimodal and has_images:
|
372
|
+
if backend == BackendType.Gemini:
|
373
|
+
parts = [{"text": content}]
|
374
|
+
for attachment in attachments:
|
375
|
+
if attachment.lower().endswith(images_extensions):
|
376
|
+
parts.append(format_image_message(image=attachment, backend=backend))
|
377
|
+
return {"role": role, "parts": parts}
|
378
|
+
else:
|
379
|
+
return {
|
380
|
+
"role": role,
|
381
|
+
"content": [
|
382
|
+
{"type": "text", "text": content},
|
383
|
+
*[
|
384
|
+
format_image_message(image=attachment, backend=backend)
|
385
|
+
for attachment in attachments
|
386
|
+
if attachment.lower().endswith(images_extensions)
|
387
|
+
],
|
388
|
+
],
|
389
|
+
}
|
390
|
+
else:
|
391
|
+
if backend == BackendType.Gemini:
|
392
|
+
return {"role": role, "parts": [{"text": content}]}
|
393
|
+
elif backend == BackendType.Anthropic:
|
394
|
+
return {"role": role, "content": content}
|
395
|
+
else:
|
396
|
+
return {"role": role, "content": content}
|
397
|
+
|
398
|
+
|
399
|
+
def format_messages_v1(
|
400
|
+
messages: list, backend: BackendType = BackendType.OpenAI, native_multimodal: bool = False
|
401
|
+
) -> list:
|
402
|
+
"""将 VectorVein 的 Message 序列化后的格式转换为不同模型支持的格式
|
403
|
+
|
404
|
+
Args:
|
405
|
+
messages (list): VectorVein messages list.
|
406
|
+
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
407
|
+
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
408
|
+
|
409
|
+
Returns:
|
410
|
+
list: _description_
|
411
|
+
"""
|
412
|
+
|
413
|
+
backend = backend.lower()
|
414
|
+
formatted_messages = []
|
415
|
+
for message in messages:
|
416
|
+
content = message["content"]["text"]
|
417
|
+
if message["content_type"] == "TXT":
|
418
|
+
role = "user" if message["author_type"] == "U" else get_assistant_role_key(backend)
|
419
|
+
if not message.get("attachments"):
|
420
|
+
if backend == BackendType.Gemini:
|
421
|
+
formatted_message = {"role": role, "parts": [{"text": content}]}
|
422
|
+
else:
|
423
|
+
formatted_message = {"role": role, "content": content}
|
424
|
+
formatted_messages.append(formatted_message)
|
425
|
+
continue
|
426
|
+
|
427
|
+
images_extensions = ("jpg", "jpeg", "png", "bmp")
|
428
|
+
has_images = any(attachment.lower().endswith(images_extensions) for attachment in message["attachments"])
|
429
|
+
|
430
|
+
content += "\n# Attachments:\n"
|
431
|
+
content += "\n".join([f"- {attachment}" for attachment in message["attachments"]])
|
432
|
+
|
433
|
+
if native_multimodal and has_images:
|
434
|
+
if backend == BackendType.Gemini:
|
435
|
+
parts = [{"text": content}]
|
436
|
+
for attachment in message["attachments"]:
|
437
|
+
if attachment.lower().endswith(images_extensions):
|
438
|
+
parts.append(format_image_message(image=attachment, backend=backend))
|
439
|
+
formatted_message = {"role": role, "parts": parts}
|
440
|
+
else:
|
441
|
+
formatted_message = {
|
442
|
+
"role": role,
|
443
|
+
"content": [
|
444
|
+
{"type": "text", "text": content},
|
445
|
+
*[
|
446
|
+
format_image_message(image=attachment, backend=backend)
|
447
|
+
for attachment in message["attachments"]
|
448
|
+
if attachment.lower().endswith(images_extensions)
|
449
|
+
],
|
450
|
+
],
|
451
|
+
}
|
452
|
+
formatted_messages.append(formatted_message)
|
453
|
+
else:
|
454
|
+
if backend == BackendType.Gemini:
|
455
|
+
formatted_message = {"role": role, "parts": [{"text": content}]}
|
456
|
+
else:
|
457
|
+
formatted_message = {"role": role, "content": content}
|
458
|
+
formatted_messages.append(formatted_message)
|
459
|
+
elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
|
460
|
+
# TODO: 目前只考虑单个 tool_call 的情况
|
461
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
462
|
+
tool_call_message = {
|
463
|
+
"content": None,
|
464
|
+
"role": "assistant",
|
465
|
+
"tool_calls": [
|
466
|
+
{
|
467
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
468
|
+
"type": "function",
|
469
|
+
"function": {
|
470
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
471
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
472
|
+
},
|
473
|
+
}
|
474
|
+
],
|
475
|
+
}
|
476
|
+
elif backend == BackendType.Anthropic:
|
477
|
+
tool_call_message = {
|
478
|
+
"role": "assistant",
|
479
|
+
"content": [
|
480
|
+
{
|
481
|
+
"type": "tool_use",
|
482
|
+
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
483
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
484
|
+
"input": message["metadata"]["selected_workflow"]["params"],
|
485
|
+
},
|
486
|
+
],
|
487
|
+
}
|
488
|
+
if content:
|
489
|
+
tool_call_message["content"].insert(
|
490
|
+
0,
|
491
|
+
{
|
492
|
+
"type": "text",
|
493
|
+
"text": content,
|
494
|
+
},
|
495
|
+
)
|
496
|
+
elif backend == BackendType.Gemini:
|
497
|
+
tool_call_message = {
|
498
|
+
"role": "model",
|
499
|
+
"parts": [
|
500
|
+
{
|
501
|
+
"functionCall": {
|
502
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
503
|
+
"args": message["metadata"]["selected_workflow"]["params"],
|
504
|
+
}
|
505
|
+
},
|
506
|
+
],
|
507
|
+
}
|
508
|
+
if content:
|
509
|
+
tool_call_message["parts"].insert(
|
510
|
+
0,
|
511
|
+
{
|
512
|
+
"text": content,
|
513
|
+
},
|
514
|
+
)
|
515
|
+
else:
|
516
|
+
tool_call_message = {
|
517
|
+
"content": json.dumps(
|
518
|
+
{
|
519
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
520
|
+
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
521
|
+
},
|
522
|
+
ensure_ascii=False,
|
523
|
+
),
|
524
|
+
"role": "assistant",
|
525
|
+
}
|
526
|
+
formatted_messages.append(tool_call_message)
|
527
|
+
|
528
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
529
|
+
tool_call_result_message = {
|
530
|
+
"role": "tool",
|
531
|
+
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
532
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
533
|
+
"content": message["metadata"].get("workflow_result", ""),
|
534
|
+
}
|
535
|
+
elif backend == BackendType.Anthropic:
|
536
|
+
tool_call_result_message = {
|
537
|
+
"role": "user",
|
538
|
+
"content": [
|
539
|
+
{
|
540
|
+
"type": "tool_result",
|
541
|
+
"tool_use_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
542
|
+
"content": message["metadata"].get("workflow_result", ""),
|
543
|
+
}
|
544
|
+
],
|
545
|
+
}
|
546
|
+
elif backend == BackendType.Gemini:
|
547
|
+
tool_call_result_message = {
|
548
|
+
"role": "function",
|
549
|
+
"parts": [
|
550
|
+
{
|
551
|
+
"functionResponse": {
|
552
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
553
|
+
"response": {
|
554
|
+
"name": message["metadata"]["selected_workflow"]["function_name"],
|
555
|
+
"content": message["metadata"].get("workflow_result", ""),
|
556
|
+
},
|
557
|
+
}
|
558
|
+
}
|
559
|
+
],
|
560
|
+
}
|
561
|
+
else:
|
562
|
+
tool_call_result_message = {
|
563
|
+
"role": "user",
|
564
|
+
"content": json.dumps(
|
565
|
+
{
|
566
|
+
"function": message["metadata"]["selected_workflow"]["function_name"],
|
567
|
+
"result": message["metadata"].get("workflow_result", ""),
|
568
|
+
},
|
569
|
+
ensure_ascii=False,
|
570
|
+
),
|
571
|
+
}
|
572
|
+
formatted_messages.append(tool_call_result_message)
|
573
|
+
|
574
|
+
if content and backend not in (BackendType.Mistral, BackendType.Anthropic, BackendType.Gemini):
|
575
|
+
formatted_messages.append({"role": "assistant", "content": content})
|
576
|
+
else:
|
577
|
+
continue
|
578
|
+
|
579
|
+
return formatted_messages
|
580
|
+
|
581
|
+
|
582
|
+
def generate_tool_use_system_prompt(tools: list, format_type: str = "json") -> str:
|
583
|
+
if format_type == "json":
|
584
|
+
return (
|
585
|
+
"You have access to the following tools. Use them if required and wait for the tool call result. Stop output after calling a tool.\n\n"
|
586
|
+
f"# Tools\n{tools}\n\n"
|
587
|
+
"# Requirements when using tools\n"
|
588
|
+
"Must starts with <|▶|> and ends with <|◀|>\n"
|
589
|
+
"Must be valid JSON format and pay attention to escape characters.\n"
|
590
|
+
'## Output format\n<|▶|>{"name": "<function name:str>", "arguments": <arguments:dict>}<|◀|>\n\n'
|
591
|
+
'## Example output\n<|▶|>{"name": "get_current_weather", "arguments": {"location": "San Francisco, CA"}}<|◀|>'
|
592
|
+
)
|
593
|
+
elif format_type == "xml":
|
594
|
+
return (
|
595
|
+
"You have access to the following tools. Use them if required and wait for the tool call result. Stop output after calling a tool.\n\n"
|
596
|
+
f"# Tools\n{tools}\n\n"
|
597
|
+
"# Requirements when using tools\n"
|
598
|
+
"Must starts with <|▶|> and ends with <|◀|>\n"
|
599
|
+
"Must be valid XML format.\n"
|
600
|
+
"## Output format\n<|▶|><invoke><tool_name>[function name:str]</tool_name><parameters><parameter_1_name>[parameter_1_value]</parameter_1_name><parameter_2_name>[parameter_2_value]</parameter_2_name>...</parameters></invoke><|◀|>\n\n"
|
601
|
+
"## Example output\n<|▶|><invoke><tool_name>calculator</tool_name><parameters><first_operand>1984135</first_operand><second_operand>9343116</second_operand><operator>*</operator></parameters></invoke><|◀|>"
|
602
|
+
)
|
603
|
+
|
604
|
+
|
605
|
+
def extract_tool_calls(content: str) -> dict:
|
606
|
+
if "<|▶|>" not in content or "<|◀|>" not in content:
|
607
|
+
return {}
|
608
|
+
tool_calls_matches = tool_use_re.findall(content)
|
609
|
+
if tool_calls_matches:
|
610
|
+
tool_call_data = {}
|
611
|
+
for match in tool_calls_matches:
|
612
|
+
try:
|
613
|
+
tool_call_data = json.loads(match)
|
614
|
+
except json.JSONDecodeError:
|
615
|
+
print(f"Failed to parse tool call data:\nContent: {content}\nMatch: {match}")
|
616
|
+
|
617
|
+
if not tool_call_data:
|
618
|
+
return {}
|
619
|
+
|
620
|
+
arguments = json.dumps(tool_call_data["arguments"], ensure_ascii=False)
|
621
|
+
return {
|
622
|
+
"tool_calls": [
|
623
|
+
{
|
624
|
+
"index": 0,
|
625
|
+
"id": "fc1",
|
626
|
+
"function": {
|
627
|
+
"arguments": arguments,
|
628
|
+
"name": tool_call_data["name"],
|
629
|
+
},
|
630
|
+
"type": "function",
|
631
|
+
}
|
632
|
+
]
|
633
|
+
}
|
634
|
+
else:
|
635
|
+
return {}
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-26 14:48:55
|
3
|
+
from ..types.enums import BackendType
|
4
|
+
from ..types.defaults import YI_DEFAULT_MODEL
|
5
|
+
from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
|
6
|
+
|
7
|
+
|
8
|
+
class YiChatClient(OpenAICompatibleChatClient):
|
9
|
+
DEFAULT_MODEL = YI_DEFAULT_MODEL
|
10
|
+
BACKEND_NAME = BackendType.Yi
|
11
|
+
|
12
|
+
|
13
|
+
class AsyncYiChatClient(AsyncOpenAICompatibleChatClient):
|
14
|
+
DEFAULT_MODEL = YI_DEFAULT_MODEL
|
15
|
+
BACKEND_NAME = BackendType.Yi
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-26 14:48:55
|
3
|
+
from ..types.enums import BackendType
|
4
|
+
from ..types.defaults import ZHIPUAI_DEFAULT_MODEL
|
5
|
+
from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
|
6
|
+
|
7
|
+
|
8
|
+
class ZhiPuAIChatClient(OpenAICompatibleChatClient):
|
9
|
+
DEFAULT_MODEL = ZHIPUAI_DEFAULT_MODEL
|
10
|
+
BACKEND_NAME = BackendType.ZhiPuAI
|
11
|
+
|
12
|
+
|
13
|
+
class AsyncZhiPuAIChatClient(AsyncOpenAICompatibleChatClient):
|
14
|
+
DEFAULT_MODEL = ZHIPUAI_DEFAULT_MODEL
|
15
|
+
BACKEND_NAME = BackendType.ZhiPuAI
|