vectorvein 0.1.9__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.9 → vectorvein-0.1.10}/PKG-INFO +1 -1
- {vectorvein-0.1.9 → vectorvein-0.1.10}/pyproject.toml +1 -1
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/minimax_client.py +37 -30
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/test_create_chat_client.py +9 -9
- {vectorvein-0.1.9 → vectorvein-0.1.10}/README.md +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/__init__.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/cat.png +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/sample_settings.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/test_format_messages.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/test_image_input_chat_client.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/test_tokens_count.py +0 -0
- {vectorvein-0.1.9 → vectorvein-0.1.10}/tests/test_tool_use_multi_turns.py +0 -0
@@ -25,7 +25,7 @@ def extract_tool_calls(response):
|
|
25
25
|
"index": index,
|
26
26
|
"id": tool_call["id"],
|
27
27
|
"function": tool_call["function"],
|
28
|
-
"type":
|
28
|
+
"type": "function",
|
29
29
|
}
|
30
30
|
for index, tool_call in enumerate(tool_calls)
|
31
31
|
]
|
@@ -59,6 +59,7 @@ class MiniMaxChatClient(BaseChatClient):
|
|
59
59
|
endpoint_id,
|
60
60
|
**kwargs,
|
61
61
|
)
|
62
|
+
self.http_client = httpx.Client()
|
62
63
|
|
63
64
|
def create_completion(
|
64
65
|
self,
|
@@ -136,41 +137,47 @@ class MiniMaxChatClient(BaseChatClient):
|
|
136
137
|
**tools_params,
|
137
138
|
}
|
138
139
|
|
139
|
-
response = httpx.post(
|
140
|
-
url=self.url,
|
141
|
-
headers=self.headers,
|
142
|
-
json=request_body,
|
143
|
-
timeout=60,
|
144
|
-
)
|
145
|
-
|
146
140
|
if self.stream:
|
147
141
|
|
148
142
|
def generator():
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
143
|
+
with self.http_client.stream(
|
144
|
+
"POST",
|
145
|
+
url=self.url,
|
146
|
+
headers=self.headers,
|
147
|
+
json=request_body,
|
148
|
+
timeout=60,
|
149
|
+
) as response:
|
150
|
+
for chunk in response.iter_lines():
|
151
|
+
if chunk:
|
152
|
+
chunk_data = json.loads(chunk[6:])
|
153
|
+
if chunk_data["object"] != "chat.completion.chunk":
|
154
|
+
continue
|
155
|
+
tool_calls_params = extract_tool_calls(chunk_data)
|
156
|
+
has_tool_calls = True if tool_calls_params else False
|
157
|
+
if has_tool_calls:
|
158
|
+
yield ChatCompletionDeltaMessage(
|
159
|
+
**{
|
160
|
+
"content": chunk_data["choices"][0]["delta"].get("content"),
|
161
|
+
"role": "assistant",
|
162
|
+
**tool_calls_params,
|
163
|
+
}
|
164
|
+
)
|
165
|
+
else:
|
166
|
+
yield ChatCompletionDeltaMessage(
|
167
|
+
**{
|
168
|
+
"content": chunk_data["choices"][0]["delta"]["content"],
|
169
|
+
"role": "assistant",
|
170
|
+
}
|
171
|
+
)
|
171
172
|
|
172
173
|
return generator()
|
173
174
|
else:
|
175
|
+
response = httpx.post(
|
176
|
+
url=self.url,
|
177
|
+
headers=self.headers,
|
178
|
+
json=request_body,
|
179
|
+
timeout=60,
|
180
|
+
)
|
174
181
|
result = response.json()
|
175
182
|
tool_calls_params = extract_tool_calls(result)
|
176
183
|
return ChatCompletionMessage(
|
@@ -193,8 +193,8 @@ model = "deepseek-chat"
|
|
193
193
|
# backend = BackendType.OpenAI
|
194
194
|
# model = "gpt-4o"
|
195
195
|
# model = "gpt-35-turbo"
|
196
|
-
|
197
|
-
|
196
|
+
backend = BackendType.MiniMax
|
197
|
+
model = "abab6.5s-chat"
|
198
198
|
# backend = BackendType.Yi
|
199
199
|
# model = "yi-large-fc"
|
200
200
|
# model = "yi-large-turbo"
|
@@ -222,13 +222,13 @@ model = "deepseek-chat"
|
|
222
222
|
# model = "mistral-large"
|
223
223
|
|
224
224
|
start_time = time.perf_counter()
|
225
|
-
test_sync(backend=backend, model=model, stream=False, use_tool=False)
|
226
|
-
test_sync(backend=backend, model=model, stream=False, use_tool=True)
|
227
|
-
test_sync(backend=backend, model=model, stream=True, use_tool=False)
|
225
|
+
# test_sync(backend=backend, model=model, stream=False, use_tool=False)
|
226
|
+
# test_sync(backend=backend, model=model, stream=False, use_tool=True)
|
227
|
+
# test_sync(backend=backend, model=model, stream=True, use_tool=False)
|
228
228
|
test_sync(backend=backend, model=model, stream=True, use_tool=True)
|
229
|
-
asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=False))
|
230
|
-
asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=True))
|
231
|
-
asyncio.run(test_async(backend=backend, model=model, stream=True, use_tool=False))
|
232
|
-
asyncio.run(test_async(backend=backend, model=model, stream=True, use_tool=True))
|
229
|
+
# asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=False))
|
230
|
+
# asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=True))
|
231
|
+
# asyncio.run(test_async(backend=backend, model=model, stream=True, use_tool=False))
|
232
|
+
# asyncio.run(test_async(backend=backend, model=model, stream=True, use_tool=True))
|
233
233
|
end_time = time.perf_counter()
|
234
234
|
print(f"Stream time elapsed: {end_time - start_time} seconds")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.1.9 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|