vectorvein 0.1.46__tar.gz → 0.1.48__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {vectorvein-0.1.46 → vectorvein-0.1.48}/PKG-INFO +1 -1
  2. {vectorvein-0.1.46 → vectorvein-0.1.48}/pyproject.toml +1 -1
  3. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/base_client.py +1 -0
  4. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/openai_compatible_client.py +15 -10
  5. {vectorvein-0.1.46 → vectorvein-0.1.48}/README.md +0 -0
  6. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/__init__.py +0 -0
  7. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/__init__.py +0 -0
  8. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  9. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  10. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  11. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  12. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/groq_client.py +0 -0
  13. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/local_client.py +0 -0
  14. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  15. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  16. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  17. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/openai_client.py +0 -0
  18. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/py.typed +0 -0
  19. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  20. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  21. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/utils.py +0 -0
  22. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/yi_client.py +0 -0
  23. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  24. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/py.typed +0 -0
  25. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/server/token_server.py +0 -0
  26. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/settings/__init__.py +0 -0
  27. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/settings/py.typed +0 -0
  28. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/types/defaults.py +0 -0
  29. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/types/enums.py +0 -0
  30. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/types/exception.py +0 -0
  31. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/types/llm_parameters.py +0 -0
  32. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/types/py.typed +0 -0
  33. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/utilities/media_processing.py +0 -0
  34. {vectorvein-0.1.46 → vectorvein-0.1.48}/src/vectorvein/utilities/retry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.1.46
3
+ Version: 0.1.48
4
4
  Summary: Default template for PDM package
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "Default template for PDM package"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.1.46"
20
+ version = "0.1.48"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -148,6 +148,7 @@ class BaseChatClient(ABC):
148
148
  tools=tools,
149
149
  tool_choice=tool_choice,
150
150
  response_format=response_format,
151
+ stream_options=stream_options,
151
152
  **kwargs,
152
153
  )
153
154
 
@@ -6,8 +6,6 @@ from functools import cached_property
6
6
  from typing import overload, Generator, AsyncGenerator, Any, Literal, Iterable
7
7
 
8
8
  import httpx
9
- from openai._streaming import Stream, AsyncStream
10
- from openai.types.chat import ChatCompletion, ChatCompletionChunk
11
9
  from openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
12
10
 
13
11
  from .base_client import BaseChatClient, BaseAsyncChatClient
@@ -202,7 +200,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
202
200
  _stream_options_params = {}
203
201
 
204
202
  if self.stream:
205
- stream_response: Stream[ChatCompletionChunk] = raw_client.chat.completions.create(
203
+ stream_response = raw_client.chat.completions.create(
206
204
  model=self.model_id,
207
205
  messages=messages,
208
206
  stream=True,
@@ -225,11 +223,14 @@ class OpenAICompatibleChatClient(BaseChatClient):
225
223
  prompt_tokens=chunk.usage.prompt_tokens or 0,
226
224
  total_tokens=chunk.usage.total_tokens or 0,
227
225
  )
228
- else:
229
- usage = None
226
+
230
227
  if len(chunk.choices) == 0:
228
+ if usage:
229
+ yield ChatCompletionDeltaMessage(usage=usage)
231
230
  continue
232
231
  if not chunk.choices[0].delta:
232
+ if usage:
233
+ yield ChatCompletionDeltaMessage(usage=usage)
233
234
  continue
234
235
  if self.model_setting.function_call_available:
235
236
  if chunk.choices[0].delta.tool_calls:
@@ -253,7 +254,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
253
254
 
254
255
  return generator()
255
256
  else:
256
- response: ChatCompletion = raw_client.chat.completions.create(
257
+ response = raw_client.chat.completions.create(
257
258
  model=self.model_id,
258
259
  messages=messages,
259
260
  stream=False,
@@ -453,7 +454,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
453
454
  max_tokens = self.model_setting.context_length - token_counts - 64
454
455
 
455
456
  if self.stream:
456
- stream_response: AsyncStream[ChatCompletionChunk] = await raw_client.chat.completions.create(
457
+ stream_response = await raw_client.chat.completions.create(
457
458
  model=self.model_id,
458
459
  messages=messages,
459
460
  stream=self.stream,
@@ -476,12 +477,16 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
476
477
  prompt_tokens=chunk.usage.prompt_tokens or 0,
477
478
  total_tokens=chunk.usage.total_tokens or 0,
478
479
  )
479
- else:
480
- usage = None
480
+
481
481
  if len(chunk.choices) == 0:
482
+ if usage:
483
+ yield ChatCompletionDeltaMessage(usage=usage)
482
484
  continue
483
485
  if not chunk.choices[0].delta:
486
+ if usage:
487
+ yield ChatCompletionDeltaMessage(usage=usage)
484
488
  continue
489
+
485
490
  if self.model_setting.function_call_available:
486
491
  if chunk.choices[0].delta.tool_calls:
487
492
  for index, tool_call in enumerate(chunk.choices[0].delta.tool_calls):
@@ -504,7 +509,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
504
509
 
505
510
  return generator()
506
511
  else:
507
- response: ChatCompletion = await raw_client.chat.completions.create(
512
+ response = await raw_client.chat.completions.create(
508
513
  model=self.model_id,
509
514
  messages=messages,
510
515
  stream=self.stream,
File without changes