vectorvein 0.1.47__tar.gz → 0.1.48__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.47 → vectorvein-0.1.48}/PKG-INFO +1 -1
- {vectorvein-0.1.47 → vectorvein-0.1.48}/pyproject.toml +1 -1
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/openai_compatible_client.py +11 -4
- {vectorvein-0.1.47 → vectorvein-0.1.48}/README.md +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/utilities/retry.py +0 -0
{vectorvein-0.1.47 → vectorvein-0.1.48}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -223,11 +223,14 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
223
223
|
prompt_tokens=chunk.usage.prompt_tokens or 0,
|
224
224
|
total_tokens=chunk.usage.total_tokens or 0,
|
225
225
|
)
|
226
|
-
|
227
|
-
usage = None
|
226
|
+
|
228
227
|
if len(chunk.choices) == 0:
|
228
|
+
if usage:
|
229
|
+
yield ChatCompletionDeltaMessage(usage=usage)
|
229
230
|
continue
|
230
231
|
if not chunk.choices[0].delta:
|
232
|
+
if usage:
|
233
|
+
yield ChatCompletionDeltaMessage(usage=usage)
|
231
234
|
continue
|
232
235
|
if self.model_setting.function_call_available:
|
233
236
|
if chunk.choices[0].delta.tool_calls:
|
@@ -474,12 +477,16 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
474
477
|
prompt_tokens=chunk.usage.prompt_tokens or 0,
|
475
478
|
total_tokens=chunk.usage.total_tokens or 0,
|
476
479
|
)
|
477
|
-
|
478
|
-
usage = None
|
480
|
+
|
479
481
|
if len(chunk.choices) == 0:
|
482
|
+
if usage:
|
483
|
+
yield ChatCompletionDeltaMessage(usage=usage)
|
480
484
|
continue
|
481
485
|
if not chunk.choices[0].delta:
|
486
|
+
if usage:
|
487
|
+
yield ChatCompletionDeltaMessage(usage=usage)
|
482
488
|
continue
|
489
|
+
|
483
490
|
if self.model_setting.function_call_available:
|
484
491
|
if chunk.choices[0].delta.tool_calls:
|
485
492
|
for index, tool_call in enumerate(chunk.choices[0].delta.tool_calls):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|