vectorvein 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/__init__.py +5 -3
- vectorvein/chat_clients/openai_compatible_client.py +9 -8
- vectorvein/chat_clients/utils.py +50 -222
- {vectorvein-0.1.1.dist-info → vectorvein-0.1.3.dist-info}/METADATA +2 -2
- {vectorvein-0.1.1.dist-info → vectorvein-0.1.3.dist-info}/RECORD +6 -6
- {vectorvein-0.1.1.dist-info → vectorvein-0.1.3.dist-info}/WHEEL +0 -0
@@ -17,7 +17,7 @@ from .deepseek_client import DeepSeekChatClient, AsyncDeepSeekChatClient
|
|
17
17
|
from ..types import defaults as defs
|
18
18
|
from ..types.enums import BackendType, ContextLengthControlType
|
19
19
|
from .anthropic_client import AnthropicChatClient, AsyncAnthropicChatClient
|
20
|
-
from .utils import format_messages
|
20
|
+
from .utils import format_messages, get_token_counts, ToolCallContentProcessor
|
21
21
|
|
22
22
|
|
23
23
|
BackendMap = {
|
@@ -103,8 +103,10 @@ def create_async_chat_client(
|
|
103
103
|
|
104
104
|
|
105
105
|
__all__ = [
|
106
|
+
"BackendType",
|
107
|
+
"format_messages",
|
108
|
+
"get_token_counts",
|
106
109
|
"create_chat_client",
|
107
110
|
"create_async_chat_client",
|
108
|
-
"
|
109
|
-
"BackendType",
|
111
|
+
"ToolCallContentProcessor",
|
110
112
|
]
|
@@ -11,9 +11,8 @@ from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
|
|
11
11
|
|
12
12
|
from .base_client import BaseChatClient, BaseAsyncChatClient
|
13
13
|
from .utils import (
|
14
|
-
tool_use_re,
|
15
14
|
cutoff_messages,
|
16
|
-
|
15
|
+
ToolCallContentProcessor,
|
17
16
|
generate_tool_use_system_prompt,
|
18
17
|
)
|
19
18
|
from ..settings import settings
|
@@ -128,7 +127,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
128
127
|
message = chunk.choices[0].delta.model_dump()
|
129
128
|
full_content += message["content"] if message["content"] else ""
|
130
129
|
if tools:
|
131
|
-
tool_call_data =
|
130
|
+
tool_call_data = ToolCallContentProcessor(result["content"]).tool_calls
|
132
131
|
if tool_call_data:
|
133
132
|
message["tool_calls"] = tool_call_data["tool_calls"]
|
134
133
|
if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
|
@@ -151,10 +150,11 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
151
150
|
tool_call.model_dump() for tool_call in response.choices[0].message.tool_calls
|
152
151
|
]
|
153
152
|
else:
|
154
|
-
|
153
|
+
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
154
|
+
tool_call_data = tool_call_content_processor.tool_calls
|
155
155
|
if tool_call_data:
|
156
156
|
result["tool_calls"] = tool_call_data["tool_calls"]
|
157
|
-
result["content"] =
|
157
|
+
result["content"] = tool_call_content_processor.non_tool_content
|
158
158
|
return result
|
159
159
|
|
160
160
|
|
@@ -265,7 +265,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
265
265
|
message = chunk.choices[0].delta.model_dump()
|
266
266
|
full_content += message["content"] if message["content"] else ""
|
267
267
|
if tools:
|
268
|
-
tool_call_data =
|
268
|
+
tool_call_data = ToolCallContentProcessor(result["content"]).tool_calls
|
269
269
|
if tool_call_data:
|
270
270
|
message["tool_calls"] = tool_call_data["tool_calls"]
|
271
271
|
if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
|
@@ -288,8 +288,9 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
288
288
|
tool_call.model_dump() for tool_call in response.choices[0].message.tool_calls
|
289
289
|
]
|
290
290
|
else:
|
291
|
-
|
291
|
+
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
292
|
+
tool_call_data = tool_call_content_processor.tool_calls
|
292
293
|
if tool_call_data:
|
293
294
|
result["tool_calls"] = tool_call_data["tool_calls"]
|
294
|
-
result["content"] =
|
295
|
+
result["content"] = tool_call_content_processor.non_tool_content
|
295
296
|
return result
|
vectorvein/chat_clients/utils.py
CHANGED
@@ -12,7 +12,49 @@ from ..utilities.media_processing import ImageProcessor
|
|
12
12
|
chatgpt_encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
13
13
|
gpt_4o_encoding = tiktoken.encoding_for_model("gpt-4o")
|
14
14
|
|
15
|
-
|
15
|
+
|
16
|
+
class ToolCallContentProcessor:
|
17
|
+
tool_use_re = re.compile(r"<\|▶\|>(.*?)<\|◀\|>", re.DOTALL)
|
18
|
+
|
19
|
+
def __init__(self, content: str):
|
20
|
+
self.content = content
|
21
|
+
|
22
|
+
@property
|
23
|
+
def non_tool_content(self):
|
24
|
+
return re.sub(self.tool_use_re, "", self.content).strip()
|
25
|
+
|
26
|
+
@property
|
27
|
+
def tool_calls(self):
|
28
|
+
if "<|▶|>" not in self.content or "<|◀|>" not in self.content:
|
29
|
+
return {}
|
30
|
+
tool_calls_matches = re.findall(self.tool_use_re, self.content)
|
31
|
+
if tool_calls_matches:
|
32
|
+
tool_call_data = {}
|
33
|
+
for match in tool_calls_matches:
|
34
|
+
try:
|
35
|
+
tool_call_data = json.loads(match)
|
36
|
+
except json.JSONDecodeError:
|
37
|
+
print(f"Failed to parse tool call data:\nContent: {self.content}\nMatch: {match}")
|
38
|
+
|
39
|
+
if not tool_call_data:
|
40
|
+
return {}
|
41
|
+
|
42
|
+
arguments = json.dumps(tool_call_data["arguments"], ensure_ascii=False)
|
43
|
+
return {
|
44
|
+
"tool_calls": [
|
45
|
+
{
|
46
|
+
"index": 0,
|
47
|
+
"id": "fc1",
|
48
|
+
"function": {
|
49
|
+
"arguments": arguments,
|
50
|
+
"name": tool_call_data["name"],
|
51
|
+
},
|
52
|
+
"type": "function",
|
53
|
+
}
|
54
|
+
]
|
55
|
+
}
|
56
|
+
else:
|
57
|
+
return {}
|
16
58
|
|
17
59
|
|
18
60
|
def get_assistant_role_key(backend: BackendType) -> str:
|
@@ -48,7 +90,7 @@ def convert_type(value, value_type):
|
|
48
90
|
return value # 如果类型未知,返回原始值
|
49
91
|
|
50
92
|
|
51
|
-
def get_token_counts(text: str, model: str = "") -> int:
|
93
|
+
def get_token_counts(text: str | dict, model: str = "") -> int:
|
52
94
|
if not isinstance(text, str):
|
53
95
|
text = str(text)
|
54
96
|
if model == "gpt-3.5-turbo":
|
@@ -178,7 +220,7 @@ def format_workflow_messages(message, content, backend):
|
|
178
220
|
formatted_messages = []
|
179
221
|
|
180
222
|
# 工具调用消息
|
181
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
223
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi):
|
182
224
|
tool_call_message = {
|
183
225
|
"content": None,
|
184
226
|
"role": "assistant",
|
@@ -235,7 +277,7 @@ def format_workflow_messages(message, content, backend):
|
|
235
277
|
formatted_messages.append(tool_call_message)
|
236
278
|
|
237
279
|
# 工具调用结果消息
|
238
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
280
|
+
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi):
|
239
281
|
tool_call_result_message = {
|
240
282
|
"role": "tool",
|
241
283
|
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
@@ -321,12 +363,14 @@ def format_openai_message(message, backend):
|
|
321
363
|
|
322
364
|
|
323
365
|
def format_messages(
|
324
|
-
messages: list,
|
366
|
+
messages: list,
|
367
|
+
backend: BackendType = BackendType.OpenAI,
|
368
|
+
native_multimodal: bool = False,
|
325
369
|
) -> list:
|
326
370
|
"""将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
|
327
371
|
|
328
372
|
Args:
|
329
|
-
messages (list): VectorVein
|
373
|
+
messages (list): VectorVein Or OpenAI messages list.
|
330
374
|
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
331
375
|
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
332
376
|
|
@@ -396,189 +440,6 @@ def format_text_message(content, role, attachments, backend, native_multimodal):
|
|
396
440
|
return {"role": role, "content": content}
|
397
441
|
|
398
442
|
|
399
|
-
def format_messages_v1(
|
400
|
-
messages: list, backend: BackendType = BackendType.OpenAI, native_multimodal: bool = False
|
401
|
-
) -> list:
|
402
|
-
"""将 VectorVein 的 Message 序列化后的格式转换为不同模型支持的格式
|
403
|
-
|
404
|
-
Args:
|
405
|
-
messages (list): VectorVein messages list.
|
406
|
-
backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
|
407
|
-
native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
|
408
|
-
|
409
|
-
Returns:
|
410
|
-
list: _description_
|
411
|
-
"""
|
412
|
-
|
413
|
-
backend = backend.lower()
|
414
|
-
formatted_messages = []
|
415
|
-
for message in messages:
|
416
|
-
content = message["content"]["text"]
|
417
|
-
if message["content_type"] == "TXT":
|
418
|
-
role = "user" if message["author_type"] == "U" else get_assistant_role_key(backend)
|
419
|
-
if not message.get("attachments"):
|
420
|
-
if backend == BackendType.Gemini:
|
421
|
-
formatted_message = {"role": role, "parts": [{"text": content}]}
|
422
|
-
else:
|
423
|
-
formatted_message = {"role": role, "content": content}
|
424
|
-
formatted_messages.append(formatted_message)
|
425
|
-
continue
|
426
|
-
|
427
|
-
images_extensions = ("jpg", "jpeg", "png", "bmp")
|
428
|
-
has_images = any(attachment.lower().endswith(images_extensions) for attachment in message["attachments"])
|
429
|
-
|
430
|
-
content += "\n# Attachments:\n"
|
431
|
-
content += "\n".join([f"- {attachment}" for attachment in message["attachments"]])
|
432
|
-
|
433
|
-
if native_multimodal and has_images:
|
434
|
-
if backend == BackendType.Gemini:
|
435
|
-
parts = [{"text": content}]
|
436
|
-
for attachment in message["attachments"]:
|
437
|
-
if attachment.lower().endswith(images_extensions):
|
438
|
-
parts.append(format_image_message(image=attachment, backend=backend))
|
439
|
-
formatted_message = {"role": role, "parts": parts}
|
440
|
-
else:
|
441
|
-
formatted_message = {
|
442
|
-
"role": role,
|
443
|
-
"content": [
|
444
|
-
{"type": "text", "text": content},
|
445
|
-
*[
|
446
|
-
format_image_message(image=attachment, backend=backend)
|
447
|
-
for attachment in message["attachments"]
|
448
|
-
if attachment.lower().endswith(images_extensions)
|
449
|
-
],
|
450
|
-
],
|
451
|
-
}
|
452
|
-
formatted_messages.append(formatted_message)
|
453
|
-
else:
|
454
|
-
if backend == BackendType.Gemini:
|
455
|
-
formatted_message = {"role": role, "parts": [{"text": content}]}
|
456
|
-
else:
|
457
|
-
formatted_message = {"role": role, "content": content}
|
458
|
-
formatted_messages.append(formatted_message)
|
459
|
-
elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
|
460
|
-
# TODO: 目前只考虑单个 tool_call 的情况
|
461
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
462
|
-
tool_call_message = {
|
463
|
-
"content": None,
|
464
|
-
"role": "assistant",
|
465
|
-
"tool_calls": [
|
466
|
-
{
|
467
|
-
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
468
|
-
"type": "function",
|
469
|
-
"function": {
|
470
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
471
|
-
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
472
|
-
},
|
473
|
-
}
|
474
|
-
],
|
475
|
-
}
|
476
|
-
elif backend == BackendType.Anthropic:
|
477
|
-
tool_call_message = {
|
478
|
-
"role": "assistant",
|
479
|
-
"content": [
|
480
|
-
{
|
481
|
-
"type": "tool_use",
|
482
|
-
"id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
483
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
484
|
-
"input": message["metadata"]["selected_workflow"]["params"],
|
485
|
-
},
|
486
|
-
],
|
487
|
-
}
|
488
|
-
if content:
|
489
|
-
tool_call_message["content"].insert(
|
490
|
-
0,
|
491
|
-
{
|
492
|
-
"type": "text",
|
493
|
-
"text": content,
|
494
|
-
},
|
495
|
-
)
|
496
|
-
elif backend == BackendType.Gemini:
|
497
|
-
tool_call_message = {
|
498
|
-
"role": "model",
|
499
|
-
"parts": [
|
500
|
-
{
|
501
|
-
"functionCall": {
|
502
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
503
|
-
"args": message["metadata"]["selected_workflow"]["params"],
|
504
|
-
}
|
505
|
-
},
|
506
|
-
],
|
507
|
-
}
|
508
|
-
if content:
|
509
|
-
tool_call_message["parts"].insert(
|
510
|
-
0,
|
511
|
-
{
|
512
|
-
"text": content,
|
513
|
-
},
|
514
|
-
)
|
515
|
-
else:
|
516
|
-
tool_call_message = {
|
517
|
-
"content": json.dumps(
|
518
|
-
{
|
519
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
520
|
-
"arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
|
521
|
-
},
|
522
|
-
ensure_ascii=False,
|
523
|
-
),
|
524
|
-
"role": "assistant",
|
525
|
-
}
|
526
|
-
formatted_messages.append(tool_call_message)
|
527
|
-
|
528
|
-
if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral):
|
529
|
-
tool_call_result_message = {
|
530
|
-
"role": "tool",
|
531
|
-
"tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
532
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
533
|
-
"content": message["metadata"].get("workflow_result", ""),
|
534
|
-
}
|
535
|
-
elif backend == BackendType.Anthropic:
|
536
|
-
tool_call_result_message = {
|
537
|
-
"role": "user",
|
538
|
-
"content": [
|
539
|
-
{
|
540
|
-
"type": "tool_result",
|
541
|
-
"tool_use_id": message["metadata"]["selected_workflow"]["tool_call_id"],
|
542
|
-
"content": message["metadata"].get("workflow_result", ""),
|
543
|
-
}
|
544
|
-
],
|
545
|
-
}
|
546
|
-
elif backend == BackendType.Gemini:
|
547
|
-
tool_call_result_message = {
|
548
|
-
"role": "function",
|
549
|
-
"parts": [
|
550
|
-
{
|
551
|
-
"functionResponse": {
|
552
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
553
|
-
"response": {
|
554
|
-
"name": message["metadata"]["selected_workflow"]["function_name"],
|
555
|
-
"content": message["metadata"].get("workflow_result", ""),
|
556
|
-
},
|
557
|
-
}
|
558
|
-
}
|
559
|
-
],
|
560
|
-
}
|
561
|
-
else:
|
562
|
-
tool_call_result_message = {
|
563
|
-
"role": "user",
|
564
|
-
"content": json.dumps(
|
565
|
-
{
|
566
|
-
"function": message["metadata"]["selected_workflow"]["function_name"],
|
567
|
-
"result": message["metadata"].get("workflow_result", ""),
|
568
|
-
},
|
569
|
-
ensure_ascii=False,
|
570
|
-
),
|
571
|
-
}
|
572
|
-
formatted_messages.append(tool_call_result_message)
|
573
|
-
|
574
|
-
if content and backend not in (BackendType.Mistral, BackendType.Anthropic, BackendType.Gemini):
|
575
|
-
formatted_messages.append({"role": "assistant", "content": content})
|
576
|
-
else:
|
577
|
-
continue
|
578
|
-
|
579
|
-
return formatted_messages
|
580
|
-
|
581
|
-
|
582
443
|
def generate_tool_use_system_prompt(tools: list, format_type: str = "json") -> str:
|
583
444
|
if format_type == "json":
|
584
445
|
return (
|
@@ -600,36 +461,3 @@ def generate_tool_use_system_prompt(tools: list, format_type: str = "json") -> s
|
|
600
461
|
"## Output format\n<|▶|><invoke><tool_name>[function name:str]</tool_name><parameters><parameter_1_name>[parameter_1_value]</parameter_1_name><parameter_2_name>[parameter_2_value]</parameter_2_name>...</parameters></invoke><|◀|>\n\n"
|
601
462
|
"## Example output\n<|▶|><invoke><tool_name>calculator</tool_name><parameters><first_operand>1984135</first_operand><second_operand>9343116</second_operand><operator>*</operator></parameters></invoke><|◀|>"
|
602
463
|
)
|
603
|
-
|
604
|
-
|
605
|
-
def extract_tool_calls(content: str) -> dict:
|
606
|
-
if "<|▶|>" not in content or "<|◀|>" not in content:
|
607
|
-
return {}
|
608
|
-
tool_calls_matches = tool_use_re.findall(content)
|
609
|
-
if tool_calls_matches:
|
610
|
-
tool_call_data = {}
|
611
|
-
for match in tool_calls_matches:
|
612
|
-
try:
|
613
|
-
tool_call_data = json.loads(match)
|
614
|
-
except json.JSONDecodeError:
|
615
|
-
print(f"Failed to parse tool call data:\nContent: {content}\nMatch: {match}")
|
616
|
-
|
617
|
-
if not tool_call_data:
|
618
|
-
return {}
|
619
|
-
|
620
|
-
arguments = json.dumps(tool_call_data["arguments"], ensure_ascii=False)
|
621
|
-
return {
|
622
|
-
"tool_calls": [
|
623
|
-
{
|
624
|
-
"index": 0,
|
625
|
-
"id": "fc1",
|
626
|
-
"function": {
|
627
|
-
"arguments": arguments,
|
628
|
-
"name": tool_call_data["name"],
|
629
|
-
},
|
630
|
-
"type": "function",
|
631
|
-
}
|
632
|
-
]
|
633
|
-
}
|
634
|
-
else:
|
635
|
-
return {}
|
@@ -1,10 +1,10 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vectorvein
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.3
|
4
4
|
Summary: Default template for PDM package
|
5
5
|
Author-Email: Anderson <andersonby@163.com>
|
6
6
|
License: MIT
|
7
|
-
Requires-Python: >=3.
|
7
|
+
Requires-Python: >=3.10
|
8
8
|
Requires-Dist: openai>=1.37.1
|
9
9
|
Requires-Dist: tiktoken>=0.7.0
|
10
10
|
Requires-Dist: httpx>=0.27.0
|
@@ -1,7 +1,7 @@
|
|
1
|
-
vectorvein-0.1.
|
2
|
-
vectorvein-0.1.
|
1
|
+
vectorvein-0.1.3.dist-info/METADATA,sha256=C0hbnXUDDO_cTFk7pWq3BBEV3bYZVB96KO1ZvQYn0ac,423
|
2
|
+
vectorvein-0.1.3.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
|
3
3
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
-
vectorvein/chat_clients/__init__.py,sha256=
|
4
|
+
vectorvein/chat_clients/__init__.py,sha256=kjXCFj4tGvD-qpiurFdc6gJWJSaqClr08iWYY3GVdJQ,3974
|
5
5
|
vectorvein/chat_clients/anthropic_client.py,sha256=sH6tZHYeZ4WncHtALvbx7axmY8YFOn8JDJKgTgIpqbw,18025
|
6
6
|
vectorvein/chat_clients/base_client.py,sha256=za03oViP1hfAE9NAtuAMsms7Chp8-Nm0s8nR1rX9OGo,2705
|
7
7
|
vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
|
@@ -12,9 +12,9 @@ vectorvein/chat_clients/minimax_client.py,sha256=x6PCSV8uYz_khfAL87q0IrbFEh6hWq5
|
|
12
12
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
13
13
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
14
14
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
15
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256
|
15
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=wtFstRAbVuhxHLxpihaiAoJbDd5-h35yVnhurCMYZRM,11726
|
16
16
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
17
|
-
vectorvein/chat_clients/utils.py,sha256=
|
17
|
+
vectorvein/chat_clients/utils.py,sha256=DGIh9RqRq61krLX4bLVlqUg7H7MmCyCzBdysRXvNcHY,17634
|
18
18
|
vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
|
19
19
|
vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
|
20
20
|
vectorvein/settings/__init__.py,sha256=1__ILDpBh25S43d32knqLBe05tF4AQwT0ZALdBHw1OY,3396
|
@@ -22,4 +22,4 @@ vectorvein/types/defaults.py,sha256=Mg-Mj3_eBzKZn1N8x1V2GqyaYgLD13i-NdSYdQC28X4,
|
|
22
22
|
vectorvein/types/enums.py,sha256=vzOenCnRlFXBwPh-lfFhjGfM-6yfDj7wZColHODqocI,1550
|
23
23
|
vectorvein/types/llm_parameters.py,sha256=88CwRQ3oCY35nPlCCcLk9T3jofKqT876CexXiB7ExQ8,3101
|
24
24
|
vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
|
25
|
-
vectorvein-0.1.
|
25
|
+
vectorvein-0.1.3.dist-info/RECORD,,
|
File without changes
|