vectorvein 0.1.8__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.8 → vectorvein-0.1.10}/PKG-INFO +1 -1
- {vectorvein-0.1.8 → vectorvein-0.1.10}/pyproject.toml +1 -1
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/minimax_client.py +40 -43
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/sample_settings.py +0 -1
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_create_chat_client.py +3 -6
- {vectorvein-0.1.8 → vectorvein-0.1.10}/README.md +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/__init__.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/cat.png +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_format_messages.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_image_input_chat_client.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_tokens_count.py +0 -0
- {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_tool_use_multi_turns.py +0 -0
@@ -25,7 +25,7 @@ def extract_tool_calls(response):
|
|
25
25
|
"index": index,
|
26
26
|
"id": tool_call["id"],
|
27
27
|
"function": tool_call["function"],
|
28
|
-
"type":
|
28
|
+
"type": "function",
|
29
29
|
}
|
30
30
|
for index, tool_call in enumerate(tool_calls)
|
31
31
|
]
|
@@ -59,6 +59,7 @@ class MiniMaxChatClient(BaseChatClient):
|
|
59
59
|
endpoint_id,
|
60
60
|
**kwargs,
|
61
61
|
)
|
62
|
+
self.http_client = httpx.Client()
|
62
63
|
|
63
64
|
def create_completion(
|
64
65
|
self,
|
@@ -93,7 +94,7 @@ class MiniMaxChatClient(BaseChatClient):
|
|
93
94
|
model=self.model_setting.id,
|
94
95
|
)
|
95
96
|
|
96
|
-
if tools
|
97
|
+
if tools:
|
97
98
|
tools_params = {
|
98
99
|
"tools": [
|
99
100
|
{
|
@@ -136,27 +137,24 @@ class MiniMaxChatClient(BaseChatClient):
|
|
136
137
|
**tools_params,
|
137
138
|
}
|
138
139
|
|
139
|
-
response = httpx.post(
|
140
|
-
url=self.url,
|
141
|
-
headers=self.headers,
|
142
|
-
json=request_body,
|
143
|
-
timeout=60,
|
144
|
-
)
|
145
|
-
|
146
140
|
if self.stream:
|
147
141
|
|
148
142
|
def generator():
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
143
|
+
with self.http_client.stream(
|
144
|
+
"POST",
|
145
|
+
url=self.url,
|
146
|
+
headers=self.headers,
|
147
|
+
json=request_body,
|
148
|
+
timeout=60,
|
149
|
+
) as response:
|
150
|
+
for chunk in response.iter_lines():
|
151
|
+
if chunk:
|
152
|
+
chunk_data = json.loads(chunk[6:])
|
153
|
+
if chunk_data["object"] != "chat.completion.chunk":
|
156
154
|
continue
|
157
|
-
|
158
|
-
|
159
|
-
|
155
|
+
tool_calls_params = extract_tool_calls(chunk_data)
|
156
|
+
has_tool_calls = True if tool_calls_params else False
|
157
|
+
if has_tool_calls:
|
160
158
|
yield ChatCompletionDeltaMessage(
|
161
159
|
**{
|
162
160
|
"content": chunk_data["choices"][0]["delta"].get("content"),
|
@@ -164,18 +162,22 @@ class MiniMaxChatClient(BaseChatClient):
|
|
164
162
|
**tool_calls_params,
|
165
163
|
}
|
166
164
|
)
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
}
|
175
|
-
)
|
165
|
+
else:
|
166
|
+
yield ChatCompletionDeltaMessage(
|
167
|
+
**{
|
168
|
+
"content": chunk_data["choices"][0]["delta"]["content"],
|
169
|
+
"role": "assistant",
|
170
|
+
}
|
171
|
+
)
|
176
172
|
|
177
173
|
return generator()
|
178
174
|
else:
|
175
|
+
response = httpx.post(
|
176
|
+
url=self.url,
|
177
|
+
headers=self.headers,
|
178
|
+
json=request_body,
|
179
|
+
timeout=60,
|
180
|
+
)
|
179
181
|
result = response.json()
|
180
182
|
tool_calls_params = extract_tool_calls(result)
|
181
183
|
return ChatCompletionMessage(
|
@@ -250,7 +252,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
|
|
250
252
|
model=self.model_setting.id,
|
251
253
|
)
|
252
254
|
|
253
|
-
if tools
|
255
|
+
if tools:
|
254
256
|
tools_params = {
|
255
257
|
"tools": [
|
256
258
|
{
|
@@ -305,24 +307,19 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
|
|
305
307
|
async for chunk in response.aiter_lines():
|
306
308
|
if chunk:
|
307
309
|
chunk_data = json.loads(chunk[6:])
|
310
|
+
if chunk_data["object"] != "chat.completion.chunk":
|
311
|
+
continue
|
308
312
|
tool_calls_params = extract_tool_calls(chunk_data)
|
309
313
|
has_tool_calls = True if tool_calls_params else False
|
310
314
|
if has_tool_calls:
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
"content": chunk_data["choices"][0]["delta"].get("content"),
|
319
|
-
"role": "assistant",
|
320
|
-
**tool_calls_params,
|
321
|
-
}
|
322
|
-
)
|
315
|
+
yield ChatCompletionDeltaMessage(
|
316
|
+
**{
|
317
|
+
"content": chunk_data["choices"][0]["delta"].get("content"),
|
318
|
+
"role": "assistant",
|
319
|
+
**tool_calls_params,
|
320
|
+
}
|
321
|
+
)
|
323
322
|
else:
|
324
|
-
if "usage" in chunk_data:
|
325
|
-
continue
|
326
323
|
yield ChatCompletionDeltaMessage(
|
327
324
|
**{
|
328
325
|
"content": chunk_data["choices"][0]["delta"]["content"],
|
@@ -192,12 +192,9 @@ model = "deepseek-chat"
|
|
192
192
|
# model = "moonshot-v1-8k"
|
193
193
|
# backend = BackendType.OpenAI
|
194
194
|
# model = "gpt-4o"
|
195
|
-
# backend = BackendType.MiniMax
|
196
|
-
# model = "abab6.5s-chat"
|
197
|
-
# backend = BackendType.OpenAI
|
198
195
|
# model = "gpt-35-turbo"
|
199
|
-
|
200
|
-
|
196
|
+
backend = BackendType.MiniMax
|
197
|
+
model = "abab6.5s-chat"
|
201
198
|
# backend = BackendType.Yi
|
202
199
|
# model = "yi-large-fc"
|
203
200
|
# model = "yi-large-turbo"
|
@@ -226,7 +223,7 @@ model = "deepseek-chat"
|
|
226
223
|
|
227
224
|
start_time = time.perf_counter()
|
228
225
|
# test_sync(backend=backend, model=model, stream=False, use_tool=False)
|
229
|
-
test_sync(backend=backend, model=model, stream=False, use_tool=True)
|
226
|
+
# test_sync(backend=backend, model=model, stream=False, use_tool=True)
|
230
227
|
# test_sync(backend=backend, model=model, stream=True, use_tool=False)
|
231
228
|
test_sync(backend=backend, model=model, stream=True, use_tool=True)
|
232
229
|
# asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=False))
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|