vectorvein 0.1.8__tar.gz → 0.1.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {vectorvein-0.1.8 → vectorvein-0.1.10}/PKG-INFO +1 -1
  2. {vectorvein-0.1.8 → vectorvein-0.1.10}/pyproject.toml +1 -1
  3. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/minimax_client.py +40 -43
  4. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/sample_settings.py +0 -1
  5. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_create_chat_client.py +3 -6
  6. {vectorvein-0.1.8 → vectorvein-0.1.10}/README.md +0 -0
  7. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/__init__.py +0 -0
  8. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/__init__.py +0 -0
  9. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  10. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/base_client.py +0 -0
  11. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  12. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  13. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/groq_client.py +0 -0
  14. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/local_client.py +0 -0
  15. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  16. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  17. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_client.py +0 -0
  18. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  19. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  20. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/utils.py +0 -0
  21. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/yi_client.py +0 -0
  22. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  23. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/settings/__init__.py +0 -0
  24. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/defaults.py +0 -0
  25. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/enums.py +0 -0
  26. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/types/llm_parameters.py +0 -0
  27. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/utilities/media_processing.py +0 -0
  28. {vectorvein-0.1.8 → vectorvein-0.1.10}/src/vectorvein/utilities/retry.py +0 -0
  29. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/__init__.py +0 -0
  30. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/cat.png +0 -0
  31. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_format_messages.py +0 -0
  32. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_image_input_chat_client.py +0 -0
  33. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_tokens_count.py +0 -0
  34. {vectorvein-0.1.8 → vectorvein-0.1.10}/tests/test_tool_use_multi_turns.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -16,7 +16,7 @@ description = "Default template for PDM package"
16
16
  name = "vectorvein"
17
17
  readme = "README.md"
18
18
  requires-python = ">=3.10"
19
- version = "0.1.8"
19
+ version = "0.1.10"
20
20
 
21
21
  [project.license]
22
22
  text = "MIT"
@@ -25,7 +25,7 @@ def extract_tool_calls(response):
25
25
  "index": index,
26
26
  "id": tool_call["id"],
27
27
  "function": tool_call["function"],
28
- "type": tool_call["type"],
28
+ "type": "function",
29
29
  }
30
30
  for index, tool_call in enumerate(tool_calls)
31
31
  ]
@@ -59,6 +59,7 @@ class MiniMaxChatClient(BaseChatClient):
59
59
  endpoint_id,
60
60
  **kwargs,
61
61
  )
62
+ self.http_client = httpx.Client()
62
63
 
63
64
  def create_completion(
64
65
  self,
@@ -93,7 +94,7 @@ class MiniMaxChatClient(BaseChatClient):
93
94
  model=self.model_setting.id,
94
95
  )
95
96
 
96
- if tools is not None:
97
+ if tools:
97
98
  tools_params = {
98
99
  "tools": [
99
100
  {
@@ -136,27 +137,24 @@ class MiniMaxChatClient(BaseChatClient):
136
137
  **tools_params,
137
138
  }
138
139
 
139
- response = httpx.post(
140
- url=self.url,
141
- headers=self.headers,
142
- json=request_body,
143
- timeout=60,
144
- )
145
-
146
140
  if self.stream:
147
141
 
148
142
  def generator():
149
- for chunk in response.iter_lines():
150
- if chunk:
151
- chunk_data = json.loads(chunk[6:])
152
- tool_calls_params = extract_tool_calls(chunk_data)
153
- has_tool_calls = True if tool_calls_params else False
154
- if has_tool_calls:
155
- if "usage" not in chunk_data:
143
+ with self.http_client.stream(
144
+ "POST",
145
+ url=self.url,
146
+ headers=self.headers,
147
+ json=request_body,
148
+ timeout=60,
149
+ ) as response:
150
+ for chunk in response.iter_lines():
151
+ if chunk:
152
+ chunk_data = json.loads(chunk[6:])
153
+ if chunk_data["object"] != "chat.completion.chunk":
156
154
  continue
157
- else:
158
- if chunk_data["object"] != "chat.completion.chunk":
159
- continue
155
+ tool_calls_params = extract_tool_calls(chunk_data)
156
+ has_tool_calls = True if tool_calls_params else False
157
+ if has_tool_calls:
160
158
  yield ChatCompletionDeltaMessage(
161
159
  **{
162
160
  "content": chunk_data["choices"][0]["delta"].get("content"),
@@ -164,18 +162,22 @@ class MiniMaxChatClient(BaseChatClient):
164
162
  **tool_calls_params,
165
163
  }
166
164
  )
167
- else:
168
- if "usage" in chunk_data:
169
- continue
170
- yield ChatCompletionDeltaMessage(
171
- **{
172
- "content": chunk_data["choices"][0]["delta"]["content"],
173
- "role": "assistant",
174
- }
175
- )
165
+ else:
166
+ yield ChatCompletionDeltaMessage(
167
+ **{
168
+ "content": chunk_data["choices"][0]["delta"]["content"],
169
+ "role": "assistant",
170
+ }
171
+ )
176
172
 
177
173
  return generator()
178
174
  else:
175
+ response = httpx.post(
176
+ url=self.url,
177
+ headers=self.headers,
178
+ json=request_body,
179
+ timeout=60,
180
+ )
179
181
  result = response.json()
180
182
  tool_calls_params = extract_tool_calls(result)
181
183
  return ChatCompletionMessage(
@@ -250,7 +252,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
250
252
  model=self.model_setting.id,
251
253
  )
252
254
 
253
- if tools is not None:
255
+ if tools:
254
256
  tools_params = {
255
257
  "tools": [
256
258
  {
@@ -305,24 +307,19 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
305
307
  async for chunk in response.aiter_lines():
306
308
  if chunk:
307
309
  chunk_data = json.loads(chunk[6:])
310
+ if chunk_data["object"] != "chat.completion.chunk":
311
+ continue
308
312
  tool_calls_params = extract_tool_calls(chunk_data)
309
313
  has_tool_calls = True if tool_calls_params else False
310
314
  if has_tool_calls:
311
- if "usage" not in chunk_data:
312
- continue
313
- else:
314
- if chunk_data["object"] != "chat.completion.chunk":
315
- continue
316
- yield ChatCompletionDeltaMessage(
317
- **{
318
- "content": chunk_data["choices"][0]["delta"].get("content"),
319
- "role": "assistant",
320
- **tool_calls_params,
321
- }
322
- )
315
+ yield ChatCompletionDeltaMessage(
316
+ **{
317
+ "content": chunk_data["choices"][0]["delta"].get("content"),
318
+ "role": "assistant",
319
+ **tool_calls_params,
320
+ }
321
+ )
323
322
  else:
324
- if "usage" in chunk_data:
325
- continue
326
323
  yield ChatCompletionDeltaMessage(
327
324
  **{
328
325
  "content": chunk_data["choices"][0]["delta"]["content"],
@@ -425,7 +425,6 @@ sample_settings = {
425
425
  "id": "gpt-35-turbo",
426
426
  "endpoints": [
427
427
  "azure-openai-vectorvein-east-us",
428
- "azure-openai-vectorvein-east-us-2",
429
428
  "azure-openai-vectorvein-au-east",
430
429
  ],
431
430
  },
@@ -192,12 +192,9 @@ model = "deepseek-chat"
192
192
  # model = "moonshot-v1-8k"
193
193
  # backend = BackendType.OpenAI
194
194
  # model = "gpt-4o"
195
- # backend = BackendType.MiniMax
196
- # model = "abab6.5s-chat"
197
- # backend = BackendType.OpenAI
198
195
  # model = "gpt-35-turbo"
199
- # backend = BackendType.MiniMax
200
- # model = "abab6.5s-chat"
196
+ backend = BackendType.MiniMax
197
+ model = "abab6.5s-chat"
201
198
  # backend = BackendType.Yi
202
199
  # model = "yi-large-fc"
203
200
  # model = "yi-large-turbo"
@@ -226,7 +223,7 @@ model = "deepseek-chat"
226
223
 
227
224
  start_time = time.perf_counter()
228
225
  # test_sync(backend=backend, model=model, stream=False, use_tool=False)
229
- test_sync(backend=backend, model=model, stream=False, use_tool=True)
226
+ # test_sync(backend=backend, model=model, stream=False, use_tool=True)
230
227
  # test_sync(backend=backend, model=model, stream=True, use_tool=False)
231
228
  test_sync(backend=backend, model=model, stream=True, use_tool=True)
232
229
  # asyncio.run(test_async(backend=backend, model=model, stream=False, use_tool=False))
File without changes
File without changes