vectorvein 0.2.85__py3-none-any.whl → 0.2.86__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/openai_compatible_client.py +10 -28
- vectorvein/utilities/media_processing.py +62 -3
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.86.dist-info}/METADATA +1 -1
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.86.dist-info}/RECORD +6 -6
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.86.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.85.dist-info → vectorvein-0.2.86.dist-info}/entry_points.txt +0 -0
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
99
99
|
return AzureOpenAI(
|
100
100
|
azure_endpoint=self.endpoint.api_base,
|
101
101
|
api_key=self.endpoint.api_key,
|
102
|
-
api_version="2025-
|
102
|
+
api_version="2025-04-01-preview",
|
103
103
|
http_client=self.http_client,
|
104
104
|
)
|
105
105
|
else:
|
@@ -327,9 +327,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
327
327
|
if not max_tokens and not max_completion_tokens:
|
328
328
|
max_output_tokens = self.model_setting.max_output_tokens
|
329
329
|
native_multimodal = self.model_setting.native_multimodal
|
330
|
-
token_counts = get_message_token_counts(
|
331
|
-
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
332
|
-
)
|
330
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
333
331
|
if max_output_tokens is not None:
|
334
332
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
335
333
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -373,9 +371,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
373
371
|
extra_body=extra_body,
|
374
372
|
timeout=timeout,
|
375
373
|
stream_options=stream_options,
|
376
|
-
response_format=response_format
|
377
|
-
if response_format and self.model_setting.response_format_available
|
378
|
-
else OPENAI_NOT_GIVEN,
|
374
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
379
375
|
**tools_params, # type: ignore
|
380
376
|
)
|
381
377
|
|
@@ -501,9 +497,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
501
497
|
extra_body=extra_body,
|
502
498
|
timeout=timeout,
|
503
499
|
stream_options=stream_options,
|
504
|
-
response_format=response_format
|
505
|
-
if response_format and self.model_setting.response_format_available
|
506
|
-
else OPENAI_NOT_GIVEN,
|
500
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
507
501
|
**tools_params, # type: ignore
|
508
502
|
)
|
509
503
|
|
@@ -524,10 +518,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
|
|
524
518
|
|
525
519
|
if tools:
|
526
520
|
if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
|
527
|
-
result["tool_calls"] = [
|
528
|
-
{**tool_call.model_dump(), "type": "function"}
|
529
|
-
for tool_call in response.choices[0].message.tool_calls
|
530
|
-
]
|
521
|
+
result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
|
531
522
|
else:
|
532
523
|
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
533
524
|
tool_call_data = tool_call_content_processor.tool_calls
|
@@ -581,7 +572,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
581
572
|
return AsyncAzureOpenAI(
|
582
573
|
azure_endpoint=self.endpoint.api_base,
|
583
574
|
api_key=self.endpoint.api_key,
|
584
|
-
api_version="2025-
|
575
|
+
api_version="2025-04-01-preview",
|
585
576
|
http_client=self.http_client,
|
586
577
|
)
|
587
578
|
else:
|
@@ -809,9 +800,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
809
800
|
if not max_tokens and not max_completion_tokens:
|
810
801
|
max_output_tokens = self.model_setting.max_output_tokens
|
811
802
|
native_multimodal = self.model_setting.native_multimodal
|
812
|
-
token_counts = get_message_token_counts(
|
813
|
-
messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
|
814
|
-
)
|
803
|
+
token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
|
815
804
|
if max_output_tokens is not None:
|
816
805
|
max_tokens = self.model_setting.context_length - token_counts - 64
|
817
806
|
max_tokens = min(max(max_tokens, 1), max_output_tokens)
|
@@ -855,9 +844,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
855
844
|
extra_body=extra_body,
|
856
845
|
timeout=timeout,
|
857
846
|
stream_options=stream_options,
|
858
|
-
response_format=response_format
|
859
|
-
if response_format and self.model_setting.response_format_available
|
860
|
-
else OPENAI_NOT_GIVEN,
|
847
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
861
848
|
**tools_params, # type: ignore
|
862
849
|
)
|
863
850
|
|
@@ -983,9 +970,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
983
970
|
extra_body=extra_body,
|
984
971
|
timeout=timeout,
|
985
972
|
stream_options=stream_options,
|
986
|
-
response_format=response_format
|
987
|
-
if response_format and self.model_setting.response_format_available
|
988
|
-
else OPENAI_NOT_GIVEN,
|
973
|
+
response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
|
989
974
|
**tools_params, # type: ignore
|
990
975
|
)
|
991
976
|
|
@@ -1006,10 +991,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
|
|
1006
991
|
|
1007
992
|
if tools:
|
1008
993
|
if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
|
1009
|
-
result["tool_calls"] = [
|
1010
|
-
{**tool_call.model_dump(), "type": "function"}
|
1011
|
-
for tool_call in response.choices[0].message.tool_calls
|
1012
|
-
]
|
994
|
+
result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
|
1013
995
|
else:
|
1014
996
|
tool_call_content_processor = ToolCallContentProcessor(result["content"])
|
1015
997
|
tool_call_data = tool_call_content_processor.tool_calls
|
@@ -3,6 +3,7 @@
|
|
3
3
|
import base64
|
4
4
|
from io import BytesIO
|
5
5
|
from pathlib import Path
|
6
|
+
from typing import Literal
|
6
7
|
from functools import cached_property
|
7
8
|
|
8
9
|
import httpx
|
@@ -93,6 +94,66 @@ class ImageProcessor:
|
|
93
94
|
if scale_factor < 0.1:
|
94
95
|
return img_bytes_resized
|
95
96
|
|
97
|
+
def _clear_cache(self):
|
98
|
+
self._cached_bytes = None
|
99
|
+
self._cached_base64_image = None
|
100
|
+
|
101
|
+
def convert_format(self, target_format: Literal["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]):
|
102
|
+
"""
|
103
|
+
转换图片的格式。
|
104
|
+
|
105
|
+
此方法将图片转换为指定的格式。支持的格式包括:'JPEG', 'PNG', 'GIF', 'BMP', 'WEBP' 等。
|
106
|
+
|
107
|
+
参数:
|
108
|
+
-----
|
109
|
+
target_format : str
|
110
|
+
目标格式的字符串表示,不区分大小写。例如 'png', 'jpeg', 'gif' 等。
|
111
|
+
|
112
|
+
返回:
|
113
|
+
-----
|
114
|
+
self
|
115
|
+
返回类的实例,支持方法链式调用。
|
116
|
+
|
117
|
+
异常:
|
118
|
+
-----
|
119
|
+
ValueError
|
120
|
+
如果指定的格式不被支持。
|
121
|
+
|
122
|
+
示例:
|
123
|
+
-----
|
124
|
+
>>> img.convert_format('png') # 将图片转换为PNG格式
|
125
|
+
>>> img.convert_format('JPEG') # 将图片转换为JPEG格式
|
126
|
+
"""
|
127
|
+
|
128
|
+
# 检查格式是否支持
|
129
|
+
supported_formats = ["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]
|
130
|
+
if target_format not in supported_formats:
|
131
|
+
raise ValueError(f"不支持的格式: {target_format}。支持的格式有: {', '.join(supported_formats)}")
|
132
|
+
|
133
|
+
# 如果当前格式与目标格式相同,不执行转换
|
134
|
+
if self._image.format == target_format:
|
135
|
+
return self
|
136
|
+
|
137
|
+
# 创建一个新的字节流
|
138
|
+
img_bytes = BytesIO()
|
139
|
+
|
140
|
+
# 处理透明通道
|
141
|
+
if target_format == "JPEG" and self._image.mode in ("RGBA", "LA"):
|
142
|
+
# JPEG不支持透明通道,转换为RGB
|
143
|
+
self._image = self._image.convert("RGB")
|
144
|
+
|
145
|
+
# 保存为新格式
|
146
|
+
self._image.save(img_bytes, format=target_format, optimize=True)
|
147
|
+
img_bytes.seek(0)
|
148
|
+
|
149
|
+
# 加载新格式的图片
|
150
|
+
self._image = Image.open(img_bytes)
|
151
|
+
self._image_format = target_format
|
152
|
+
|
153
|
+
# 清除缓存
|
154
|
+
self._clear_cache()
|
155
|
+
return self
|
156
|
+
|
96
157
|
@property
|
97
158
|
def bytes(self):
|
98
159
|
if self._cached_bytes is not None:
|
@@ -102,9 +163,7 @@ class ImageProcessor:
|
|
102
163
|
img_bytes = BytesIO()
|
103
164
|
|
104
165
|
# 检查图像是否有透明通道
|
105
|
-
has_transparency = self._image.mode in ("RGBA", "LA") or (
|
106
|
-
self._image.mode == "P" and "transparency" in self._image.info
|
107
|
-
)
|
166
|
+
has_transparency = self._image.mode in ("RGBA", "LA") or (self._image.mode == "P" and "transparency" in self._image.info)
|
108
167
|
|
109
168
|
if has_transparency:
|
110
169
|
# 如果有透明通道,使用PNG格式
|
@@ -1,6 +1,6 @@
|
|
1
|
-
vectorvein-0.2.
|
2
|
-
vectorvein-0.2.
|
3
|
-
vectorvein-0.2.
|
1
|
+
vectorvein-0.2.86.dist-info/METADATA,sha256=ZprAIAU9UT5xHzgCmYN1CAFWjddGwQXuoes1UmLnmU0,4567
|
2
|
+
vectorvein-0.2.86.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
vectorvein-0.2.86.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
4
|
vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
|
6
6
|
vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
|
@@ -19,7 +19,7 @@ vectorvein/chat_clients/minimax_client.py,sha256=YOILWcsHsN5tihLTMbKJIyJr9TJREMI
|
|
19
19
|
vectorvein/chat_clients/mistral_client.py,sha256=1aKSylzBDaLYcFnaBIL4-sXSzWmXfBeON9Q0rq-ziWw,534
|
20
20
|
vectorvein/chat_clients/moonshot_client.py,sha256=gbu-6nGxx8uM_U2WlI4Wus881rFRotzHtMSoYOcruGU,526
|
21
21
|
vectorvein/chat_clients/openai_client.py,sha256=Nz6tV45pWcsOupxjnsRsGTicbQNJWIZyxuJoJ5DGMpg,527
|
22
|
-
vectorvein/chat_clients/openai_compatible_client.py,sha256=
|
22
|
+
vectorvein/chat_clients/openai_compatible_client.py,sha256=0nj5DwnY8llctoxbW-UXsqVjTRiuezAgkISnV_ygnA4,48869
|
23
23
|
vectorvein/chat_clients/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
vectorvein/chat_clients/qwen_client.py,sha256=-ryh-m9PgsO0fc4ulcCmPTy1155J8YUy15uPoJQOHA0,513
|
25
25
|
vectorvein/chat_clients/stepfun_client.py,sha256=zsD2W5ahmR4DD9cqQTXmJr3txrGuvxbRWhFlRdwNijI,519
|
@@ -38,7 +38,7 @@ vectorvein/types/exception.py,sha256=KtnqZ-1DstHm95SZAyZdHhkGq1bJ4A9Aw3Zfdu-VIFo
|
|
38
38
|
vectorvein/types/llm_parameters.py,sha256=Fo_-_rFkBvl_dUsCZxy2xNMC9Q4qVGLzDBL9jRVbXIQ,8489
|
39
39
|
vectorvein/types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
40
40
|
vectorvein/types/settings.py,sha256=5wINBiT9qj3ZwDiE6Kf4r6wD3yY1bKN40xxZ7F6jEfI,4844
|
41
|
-
vectorvein/utilities/media_processing.py,sha256=
|
41
|
+
vectorvein/utilities/media_processing.py,sha256=TPm73MS_a1WVqpYv0N9BrZcOK-kxWa3QHSFUI_7WgmU,8257
|
42
42
|
vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH4tM_u_4,10300
|
43
43
|
vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
|
44
44
|
vectorvein/workflow/graph/edge.py,sha256=1ckyyjCue_PLm7P1ItUfKOy6AKkemOpZ9m1WJ8UXIHQ,1072
|
@@ -65,4 +65,4 @@ vectorvein/workflow/utils/analyse.py,sha256=msmvyz35UTYTwqQR5sg9H0sm1vxmGDSmep9X
|
|
65
65
|
vectorvein/workflow/utils/check.py,sha256=B_NdwqIqnc7Ko2HHqFpfOmWVaAu21tPITe0szKfiZKc,11414
|
66
66
|
vectorvein/workflow/utils/json_to_code.py,sha256=P8dhhSNgKhTnW17qXNjLO2aLdb0rA8qMAWxhObol2TU,7295
|
67
67
|
vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
|
68
|
-
vectorvein-0.2.
|
68
|
+
vectorvein-0.2.86.dist-info/RECORD,,
|
File without changes
|
File without changes
|