vectorvein 0.1.4__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.4 → vectorvein-0.1.5}/PKG-INFO +1 -1
- {vectorvein-0.1.4 → vectorvein-0.1.5}/pyproject.toml +1 -1
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/__init__.py +2 -2
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/openai_compatible_client.py +2 -2
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/utils.py +1 -1
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/sample_settings.py +34 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/test_create_chat_client.py +7 -2
- {vectorvein-0.1.4 → vectorvein-0.1.5}/README.md +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/__init__.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/cat.png +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/test_format_messages.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/test_image_input_chat_client.py +0 -0
- {vectorvein-0.1.4 → vectorvein-0.1.5}/tests/test_tool_use_multi_turns.py +0 -0
@@ -55,7 +55,7 @@ BackendMap = {
|
|
55
55
|
def create_chat_client(
|
56
56
|
backend: BackendType,
|
57
57
|
model: str | None = None,
|
58
|
-
stream: bool =
|
58
|
+
stream: bool = False,
|
59
59
|
temperature: float = 0.7,
|
60
60
|
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
61
61
|
**kwargs,
|
@@ -80,7 +80,7 @@ def create_chat_client(
|
|
80
80
|
def create_async_chat_client(
|
81
81
|
backend: BackendType,
|
82
82
|
model: str | None = None,
|
83
|
-
stream: bool =
|
83
|
+
stream: bool = False,
|
84
84
|
temperature: float = 0.7,
|
85
85
|
context_length_control: ContextLengthControlType = defs.CONTEXT_LENGTH_CONTROL,
|
86
86
|
**kwargs,
|
{vectorvein-0.1.4 → vectorvein-0.1.5}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -127,7 +127,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
127
127
|
message = chunk.choices[0].delta.model_dump()
|
128
128
|
full_content += message["content"] if message["content"] else ""
|
129
129
|
if tools:
|
130
|
-
tool_call_data = ToolCallContentProcessor(
|
130
|
+
tool_call_data = ToolCallContentProcessor(full_content).tool_calls
|
131
131
|
if tool_call_data:
|
132
132
|
message["tool_calls"] = tool_call_data["tool_calls"]
|
133
133
|
if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
|
@@ -266,7 +266,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
266
266
|
message = chunk.choices[0].delta.model_dump()
|
267
267
|
full_content += message["content"] if message["content"] else ""
|
268
268
|
if tools:
|
269
|
-
tool_call_data = ToolCallContentProcessor(
|
269
|
+
tool_call_data = ToolCallContentProcessor(full_content).tool_calls
|
270
270
|
if tool_call_data:
|
271
271
|
message["tool_calls"] = tool_call_data["tool_calls"]
|
272
272
|
if full_content in ("<", "<|", "<|▶", "<|▶|") or full_content.startswith("<|▶|>"):
|
@@ -464,4 +464,38 @@ sample_settings = {
|
|
464
464
|
"endpoints": ["zhipuai-default"],
|
465
465
|
},
|
466
466
|
},
|
467
|
+
"qwen_models": {
|
468
|
+
"qwen1.5-1.8b-chat": {
|
469
|
+
"id": "Qwen/Qwen1.5-1.8B-Chat",
|
470
|
+
"endpoints": ["together-default"],
|
471
|
+
},
|
472
|
+
"qwen1.5-4b-chat": {
|
473
|
+
"id": "Qwen/Qwen1.5-4B-Chat",
|
474
|
+
"endpoints": ["together-default"],
|
475
|
+
},
|
476
|
+
"qwen1.5-7b-chat": {
|
477
|
+
"id": "Qwen/Qwen1.5-7B-Chat",
|
478
|
+
"endpoints": ["together-default"],
|
479
|
+
},
|
480
|
+
"qwen1.5-14b-chat": {
|
481
|
+
"id": "Qwen/Qwen1.5-14B-Chat",
|
482
|
+
"endpoints": ["together-default"],
|
483
|
+
},
|
484
|
+
"qwen1.5-32b-chat": {
|
485
|
+
"id": "Qwen/Qwen1.5-32B-Chat",
|
486
|
+
"endpoints": ["together-default"],
|
487
|
+
},
|
488
|
+
"qwen1.5-72b-chat": {
|
489
|
+
"id": "Qwen/Qwen1.5-72B-Chat",
|
490
|
+
"endpoints": ["together-default"],
|
491
|
+
},
|
492
|
+
"qwen1.5-110b-chat": {
|
493
|
+
"id": "Qwen/Qwen1.5-110B-Chat",
|
494
|
+
"endpoints": ["together-default"],
|
495
|
+
},
|
496
|
+
"qwen2-72b-instruct": {
|
497
|
+
"id": "Qwen/Qwen2-72B-Instruct",
|
498
|
+
"endpoints": ["together-default"],
|
499
|
+
},
|
500
|
+
},
|
467
501
|
}
|
@@ -189,10 +189,15 @@ model = "claude-3-5-sonnet-20240620"
|
|
189
189
|
# model = "gemini-1.5-flash"
|
190
190
|
# backend = BackendType.OpenAI
|
191
191
|
# model = "gpt-35-turbo"
|
192
|
-
backend = BackendType.MiniMax
|
193
|
-
model = "abab6.5s-chat"
|
192
|
+
# backend = BackendType.MiniMax
|
193
|
+
# model = "abab6.5s-chat"
|
194
194
|
# backend = BackendType.Yi
|
195
195
|
# model = "yi-large-fc"
|
196
|
+
# backend = BackendType.Mistral
|
197
|
+
# model = "mixtral-8x7b"
|
198
|
+
backend = BackendType.Qwen
|
199
|
+
model = "qwen2-72b-instruct"
|
200
|
+
|
196
201
|
start_time = time.perf_counter()
|
197
202
|
# test_sync(backend=backend, model=model, stream=False, use_tool=False)
|
198
203
|
# test_sync(backend=backend, model=model, stream=False, use_tool=True)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|