vectorvein 0.2.67__tar.gz → 0.2.68__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.2.67 → vectorvein-0.2.68}/PKG-INFO +1 -1
- {vectorvein-0.2.67 → vectorvein-0.2.68}/pyproject.toml +1 -1
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/openai_compatible_client.py +22 -10
- {vectorvein-0.2.67 → vectorvein-0.2.68}/README.md +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/api/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/api/client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/api/exceptions.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/api/models.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/ernie_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/types/settings.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/utilities/rate_limiter.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/graph/edge.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/graph/node.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/graph/port.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/graph/workflow.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/__init__.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/llms.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/output.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/tools.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/triggers.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/utils/check.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
- {vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/workflow/utils/layout.py +0 -0
{vectorvein-0.2.67 → vectorvein-0.2.68}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
@@ -117,7 +117,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
117
117
|
model: str | None = None,
|
118
118
|
stream: Literal[False] = False,
|
119
119
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
120
|
-
max_tokens: int | None =
|
120
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
121
121
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
122
122
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
123
123
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -158,7 +158,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
158
158
|
model: str | None = None,
|
159
159
|
stream: Literal[True],
|
160
160
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
161
|
-
max_tokens: int | None =
|
161
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
162
162
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
163
163
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
164
164
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -199,7 +199,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
199
199
|
model: str | None = None,
|
200
200
|
stream: bool,
|
201
201
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
202
|
-
max_tokens: int | None =
|
202
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
203
203
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
204
204
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
205
205
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -239,7 +239,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
239
239
|
model: str | None = None,
|
240
240
|
stream: Literal[False] | Literal[True] = False,
|
241
241
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
242
|
-
max_tokens: int | None =
|
242
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
243
243
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
244
244
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
245
245
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -280,6 +280,8 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
280
280
|
self.temperature = temperature
|
281
281
|
if isinstance(top_p, AnthropicNotGiven):
|
282
282
|
top_p = NOT_GIVEN
|
283
|
+
if isinstance(max_tokens, AnthropicNotGiven):
|
284
|
+
max_tokens = NOT_GIVEN
|
283
285
|
|
284
286
|
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
285
287
|
self.model_setting = self.backend_settings.models[self.model]
|
@@ -322,7 +324,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
322
324
|
else:
|
323
325
|
tools_params = {}
|
324
326
|
|
325
|
-
if max_tokens
|
327
|
+
if not max_tokens and not max_completion_tokens:
|
326
328
|
max_output_tokens = self.model_setting.max_output_tokens
|
327
329
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
|
328
330
|
if max_output_tokens is not None:
|
@@ -331,6 +333,10 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
331
333
|
else:
|
332
334
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
333
335
|
|
336
|
+
if "o3-mini" in self.model_id:
|
337
|
+
max_completion_tokens = max_tokens
|
338
|
+
max_tokens = NOT_GIVEN
|
339
|
+
|
334
340
|
self._acquire_rate_limit(self.endpoint, self.model, messages)
|
335
341
|
|
336
342
|
if self.stream:
|
@@ -587,7 +593,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
587
593
|
model: str | None = None,
|
588
594
|
stream: Literal[False] = False,
|
589
595
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
590
|
-
max_tokens: int | None =
|
596
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
591
597
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
592
598
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
593
599
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -628,7 +634,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
628
634
|
model: str | None = None,
|
629
635
|
stream: Literal[True],
|
630
636
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
631
|
-
max_tokens: int | None =
|
637
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
632
638
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
633
639
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
634
640
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -669,7 +675,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
669
675
|
model: str | None = None,
|
670
676
|
stream: bool,
|
671
677
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
672
|
-
max_tokens: int | None =
|
678
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
673
679
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
674
680
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
675
681
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -709,7 +715,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
709
715
|
model: str | None = None,
|
710
716
|
stream: Literal[False] | Literal[True] = False,
|
711
717
|
temperature: float | None | NotGiven = NOT_GIVEN,
|
712
|
-
max_tokens: int | None =
|
718
|
+
max_tokens: int | None | NotGiven = NOT_GIVEN,
|
713
719
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
|
714
720
|
tool_choice: ToolChoice | NotGiven = NOT_GIVEN,
|
715
721
|
response_format: ResponseFormat | NotGiven = NOT_GIVEN,
|
@@ -750,6 +756,8 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
750
756
|
self.temperature = temperature
|
751
757
|
if isinstance(top_p, AnthropicNotGiven):
|
752
758
|
top_p = NOT_GIVEN
|
759
|
+
if isinstance(max_tokens, AnthropicNotGiven):
|
760
|
+
max_tokens = NOT_GIVEN
|
753
761
|
|
754
762
|
raw_client = self.raw_client # 调用完 self.raw_client 后,self.model_id 会被赋值
|
755
763
|
self.model_setting = self.backend_settings.models[self.model]
|
@@ -792,7 +800,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
792
800
|
else:
|
793
801
|
tools_params = {}
|
794
802
|
|
795
|
-
if max_tokens
|
803
|
+
if not max_tokens and not max_completion_tokens:
|
796
804
|
max_output_tokens = self.model_setting.max_output_tokens
|
797
805
|
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model)
|
798
806
|
if max_output_tokens is not None:
|
@@ -801,6 +809,10 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
801
809
|
else:
|
802
810
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
803
811
|
|
812
|
+
if "o3-mini" in self.model_id:
|
813
|
+
max_completion_tokens = max_tokens
|
814
|
+
max_tokens = NOT_GIVEN
|
815
|
+
|
804
816
|
await self._acquire_rate_limit(self.endpoint, self.model, messages)
|
805
817
|
|
806
818
|
if self.stream:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|