vectorvein 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -118,6 +118,7 @@ class AnthropicChatClient(BaseChatClient):
118
118
  max_tokens: int | None = None,
119
119
  tools: list | NotGiven = NOT_GIVEN,
120
120
  tool_choice: str | NotGiven = NOT_GIVEN,
121
+ **kwargs,
121
122
  ):
122
123
  if model is not None:
123
124
  self.model = model
@@ -210,6 +211,7 @@ class AnthropicChatClient(BaseChatClient):
210
211
  max_tokens=max_tokens,
211
212
  tools=tools_params,
212
213
  tool_choice=tool_choice,
214
+ **kwargs,
213
215
  )
214
216
 
215
217
  if self.stream:
@@ -318,6 +320,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
318
320
  max_tokens: int | None = None,
319
321
  tools: list | NotGiven = NOT_GIVEN,
320
322
  tool_choice: str | NotGiven = NOT_GIVEN,
323
+ **kwargs,
321
324
  ):
322
325
  if model is not None:
323
326
  self.model = model
@@ -410,6 +413,7 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
410
413
  max_tokens=max_tokens,
411
414
  tools=tools_params,
412
415
  tool_choice=tool_choice,
416
+ **kwargs,
413
417
  )
414
418
 
415
419
  if self.stream:
@@ -146,9 +146,9 @@ class GeminiChatClient(BaseChatClient):
146
146
  result = {
147
147
  "content": "",
148
148
  "usage": {
149
- "prompt_tokens": response["usageMetadata"]["promptTokenCount"],
150
- "completion_tokens": response["usageMetadata"]["candidatesTokenCount"],
151
- "total_tokens": response["usageMetadata"]["totalTokenCount"],
149
+ "prompt_tokens": response.get("usageMetadata", {}).get("promptTokenCount", 0),
150
+ "completion_tokens": response.get("usageMetadata", {}).get("candidatesTokenCount", 0),
151
+ "total_tokens": response.get("usageMetadata", {}).get("totalTokenCount", 0),
152
152
  },
153
153
  }
154
154
  tool_calls = []
@@ -309,9 +309,9 @@ class AsyncGeminiChatClient(BaseAsyncChatClient):
309
309
  result = {
310
310
  "content": "",
311
311
  "usage": {
312
- "prompt_tokens": response["usageMetadata"]["promptTokenCount"],
313
- "completion_tokens": response["usageMetadata"]["candidatesTokenCount"],
314
- "total_tokens": response["usageMetadata"]["totalTokenCount"],
312
+ "prompt_tokens": response.get("usageMetadata", {}).get("promptTokenCount", 0),
313
+ "completion_tokens": response.get("usageMetadata", {}).get("candidatesTokenCount", 0),
314
+ "total_tokens": response.get("usageMetadata", {}).get("totalTokenCount", 0),
315
315
  },
316
316
  }
317
317
  tool_calls = []
@@ -25,7 +25,7 @@ def extract_tool_calls(response):
25
25
  "index": index,
26
26
  "id": tool_call["id"],
27
27
  "function": tool_call["function"],
28
- "type": tool_call["type"],
28
+ "type": "function",
29
29
  }
30
30
  for index, tool_call in enumerate(tool_calls)
31
31
  ]
@@ -59,6 +59,7 @@ class MiniMaxChatClient(BaseChatClient):
59
59
  endpoint_id,
60
60
  **kwargs,
61
61
  )
62
+ self.http_client = httpx.Client()
62
63
 
63
64
  def create_completion(
64
65
  self,
@@ -69,6 +70,7 @@ class MiniMaxChatClient(BaseChatClient):
69
70
  max_tokens: int | None = None,
70
71
  tools: list | None = None,
71
72
  tool_choice: str = "auto",
73
+ **kwargs,
72
74
  ):
73
75
  if model is not None:
74
76
  self.model = model
@@ -134,43 +136,50 @@ class MiniMaxChatClient(BaseChatClient):
134
136
  "stream": self.stream,
135
137
  "mask_sensitive_info": False,
136
138
  **tools_params,
139
+ **kwargs,
137
140
  }
138
141
 
139
- response = httpx.post(
140
- url=self.url,
141
- headers=self.headers,
142
- json=request_body,
143
- timeout=60,
144
- )
145
-
146
142
  if self.stream:
147
143
 
148
144
  def generator():
149
- for chunk in response.iter_lines():
150
- if chunk:
151
- chunk_data = json.loads(chunk[6:])
152
- if chunk_data["object"] != "chat.completion.chunk":
153
- continue
154
- tool_calls_params = extract_tool_calls(chunk_data)
155
- has_tool_calls = True if tool_calls_params else False
156
- if has_tool_calls:
157
- yield ChatCompletionDeltaMessage(
158
- **{
159
- "content": chunk_data["choices"][0]["delta"].get("content"),
160
- "role": "assistant",
161
- **tool_calls_params,
162
- }
163
- )
164
- else:
165
- yield ChatCompletionDeltaMessage(
166
- **{
167
- "content": chunk_data["choices"][0]["delta"]["content"],
168
- "role": "assistant",
169
- }
170
- )
145
+ with self.http_client.stream(
146
+ "POST",
147
+ url=self.url,
148
+ headers=self.headers,
149
+ json=request_body,
150
+ timeout=60,
151
+ ) as response:
152
+ for chunk in response.iter_lines():
153
+ if chunk:
154
+ chunk_data = json.loads(chunk[6:])
155
+ if chunk_data["object"] != "chat.completion.chunk":
156
+ continue
157
+ tool_calls_params = extract_tool_calls(chunk_data)
158
+ has_tool_calls = True if tool_calls_params else False
159
+ if has_tool_calls:
160
+ yield ChatCompletionDeltaMessage(
161
+ **{
162
+ "content": chunk_data["choices"][0]["delta"].get("content"),
163
+ "role": "assistant",
164
+ **tool_calls_params,
165
+ }
166
+ )
167
+ else:
168
+ yield ChatCompletionDeltaMessage(
169
+ **{
170
+ "content": chunk_data["choices"][0]["delta"]["content"],
171
+ "role": "assistant",
172
+ }
173
+ )
171
174
 
172
175
  return generator()
173
176
  else:
177
+ response = httpx.post(
178
+ url=self.url,
179
+ headers=self.headers,
180
+ json=request_body,
181
+ timeout=60,
182
+ )
174
183
  result = response.json()
175
184
  tool_calls_params = extract_tool_calls(result)
176
185
  return ChatCompletionMessage(
@@ -221,6 +230,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
221
230
  max_tokens: int | None = None,
222
231
  tools: list | None = None,
223
232
  tool_choice: str = "auto",
233
+ **kwargs,
224
234
  ):
225
235
  if model is not None:
226
236
  self.model = model
@@ -284,6 +294,7 @@ class AsyncMiniMaxChatClient(BaseAsyncChatClient):
284
294
  "stream": self.stream,
285
295
  "mask_sensitive_info": False,
286
296
  **tools_params,
297
+ **kwargs,
287
298
  }
288
299
 
289
300
  if self.stream:
@@ -54,6 +54,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
54
54
  max_tokens: int | None = None,
55
55
  tools: list | NotGiven = NOT_GIVEN,
56
56
  tool_choice: str | NotGiven = NOT_GIVEN,
57
+ **kwargs,
57
58
  ):
58
59
  if model is not None:
59
60
  self.model = model
@@ -120,6 +121,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
120
121
  temperature=self.temperature,
121
122
  max_tokens=max_tokens,
122
123
  **tools_params,
124
+ **kwargs,
123
125
  )
124
126
 
125
127
  if self.stream:
@@ -207,6 +209,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
207
209
  max_tokens: int | None = None,
208
210
  tools: list | NotGiven = NOT_GIVEN,
209
211
  tool_choice: str | NotGiven = NOT_GIVEN,
212
+ **kwargs,
210
213
  ):
211
214
  if model is not None:
212
215
  self.model = model
@@ -273,6 +276,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
273
276
  temperature=self.temperature,
274
277
  max_tokens=max_tokens,
275
278
  **tools_params,
279
+ **kwargs,
276
280
  )
277
281
 
278
282
  if self.stream:
@@ -213,6 +213,13 @@ ZHIPUAI_MODELS = {
213
213
  "response_format_available": False,
214
214
  "max_output_tokens": 4095,
215
215
  },
216
+ "glm-4-plus": {
217
+ "id": "glm-4-plus",
218
+ "context_length": 128000,
219
+ "function_call_available": True,
220
+ "response_format_available": False,
221
+ "max_output_tokens": 4095,
222
+ },
216
223
  "glm-4-0520": {
217
224
  "id": "glm-4-0520",
218
225
  "context_length": 128000,
@@ -255,6 +262,13 @@ ZHIPUAI_MODELS = {
255
262
  "response_format_available": False,
256
263
  "max_output_tokens": 1024,
257
264
  },
265
+ "glm-4v-plus": {
266
+ "id": "glm-4v-plus",
267
+ "context_length": 2000,
268
+ "function_call_available": False,
269
+ "response_format_available": False,
270
+ "max_output_tokens": 1024,
271
+ },
258
272
  }
259
273
 
260
274
  # Mistral models
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,26 +1,26 @@
1
- vectorvein-0.1.9.dist-info/METADATA,sha256=AlikMRU7DLdZ6gZMohsL1X6NiuAWP3jGV0tE-uZkhNo,501
2
- vectorvein-0.1.9.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
1
+ vectorvein-0.1.11.dist-info/METADATA,sha256=rw0fFY-7V-teJUIt4t4KlvLIZHp4hjgaQBRpQ816eP0,502
2
+ vectorvein-0.1.11.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
3
3
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  vectorvein/chat_clients/__init__.py,sha256=5j7W--jr-l2cqDJp38uXYvkydDK0rnzm7MYGSACHKmU,3976
5
- vectorvein/chat_clients/anthropic_client.py,sha256=JjigSUsIn06ixIEjnOJhVbcMqy2_MAL3iVUlDFAFMW4,20008
5
+ vectorvein/chat_clients/anthropic_client.py,sha256=MHpLywbIIgFsAkj8EiKEoxUgxcbx3njFxLTN2GhEjys,20088
6
6
  vectorvein/chat_clients/base_client.py,sha256=wMXpQ1L1KDb2Hg6va3H3GmcVeQB6r6sh7F4IS0DBQWI,4275
7
7
  vectorvein/chat_clients/deepseek_client.py,sha256=3qWu01NlJAP2N-Ff62d5-CZXZitlizE1fzb20LNetig,526
8
- vectorvein/chat_clients/gemini_client.py,sha256=IHcBHTSHkj3f962S5L7Ga-XA-96sq8quIDRZpoqvGss,13653
8
+ vectorvein/chat_clients/gemini_client.py,sha256=jUu-wwpaChP8_lxLdWnYkgVr_aSegCHbmBgnpFztqAw,13743
9
9
  vectorvein/chat_clients/groq_client.py,sha256=Uow4pgdmFi93ZQSoOol2-0PhhqkW-S0XuSldvppz5U4,498
10
10
  vectorvein/chat_clients/local_client.py,sha256=55nOsxzqUf79q3Y14MKROA71zxhsT7p7FsDZ89rts2M,422
11
- vectorvein/chat_clients/minimax_client.py,sha256=uomp3DyTBmDXQtCmRiYp1VIIOFoVZ9_oyM3-j4JO7go,13000
11
+ vectorvein/chat_clients/minimax_client.py,sha256=ZO-7WNsvsct3cpuo0Qc6WL3A0kkijn0J8o0ahlxmUyE,13478
12
12
  vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
13
13
  vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
14
14
  vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
15
- vectorvein/chat_clients/openai_compatible_client.py,sha256=fvg--wFwnFEEhLGS9_u1XzNhtkkDUf4_rq6zYKwnOuI,13738
15
+ vectorvein/chat_clients/openai_compatible_client.py,sha256=rPxIPj4STm1sy7T6bqyZpO-FPwMyjyQmThyJqd8_jYs,13822
16
16
  vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
17
17
  vectorvein/chat_clients/utils.py,sha256=mnAew2Ie3nQHdEyDLKuJvXkQ5QdcSAJ6SpYk5JPbR1Q,20888
18
18
  vectorvein/chat_clients/yi_client.py,sha256=RNf4CRuPJfixrwLZ3-DEc3t25QDe1mvZeb9sku2f8Bc,484
19
19
  vectorvein/chat_clients/zhipuai_client.py,sha256=Ys5DSeLCuedaDXr3PfG1EW2zKXopt-awO2IylWSwY0s,519
20
20
  vectorvein/settings/__init__.py,sha256=4mpccT7eZC3yI1vVnVViW4wHBnDEH9D2R5EsIP34VgU,3218
21
- vectorvein/types/defaults.py,sha256=ANIYL0W0bxl2IBxvtkS_WlS_qMQQwpi5TKRdLxdk47M,13027
21
+ vectorvein/types/defaults.py,sha256=lCzGOLybX8FzHX-Cv32BaQFZ8sHvPhGIIwDD-VksP20,13460
22
22
  vectorvein/types/enums.py,sha256=vzOenCnRlFXBwPh-lfFhjGfM-6yfDj7wZColHODqocI,1550
23
23
  vectorvein/types/llm_parameters.py,sha256=nBjStC2zndTY__yhD2WFXB09taxEhDLE3OHA6MICfgE,3494
24
24
  vectorvein/utilities/media_processing.py,sha256=BujciRmw1GMmc3ELRvafL8STcy6r5b2rVnh27-uA7so,2256
25
25
  vectorvein/utilities/retry.py,sha256=9ePuJdeUUGx-qMWfaFxmlOvG_lQPwCQ4UB1z3Edlo34,993
26
- vectorvein-0.1.9.dist-info/RECORD,,
26
+ vectorvein-0.1.11.dist-info/RECORD,,