vectorvein 0.1.40__tar.gz → 0.1.42__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.1.40 → vectorvein-0.1.42}/PKG-INFO +5 -1
- {vectorvein-0.1.40 → vectorvein-0.1.42}/pyproject.toml +8 -1
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/utils.py +29 -4
- vectorvein-0.1.42/src/vectorvein/server/token_server.py +47 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/settings/__init__.py +2 -1
- vectorvein-0.1.42/src/vectorvein/utilities/media_processing.py +148 -0
- vectorvein-0.1.42/src/vectorvein/utilities/retry.py +62 -0
- vectorvein-0.1.40/src/vectorvein/utilities/media_processing.py +0 -70
- vectorvein-0.1.40/src/vectorvein/utilities/retry.py +0 -36
- {vectorvein-0.1.40 → vectorvein-0.1.42}/README.md +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/types/py.typed +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vectorvein
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.42
|
4
4
|
Summary: Default template for PDM package
|
5
5
|
Author-Email: Anderson <andersonby@163.com>
|
6
6
|
License: MIT
|
@@ -13,6 +13,10 @@ Requires-Dist: pydantic>=2.8.2
|
|
13
13
|
Requires-Dist: Pillow>=10.4.0
|
14
14
|
Requires-Dist: deepseek-tokenizer>=0.1.0
|
15
15
|
Requires-Dist: qwen-tokenizer>=0.2.0
|
16
|
+
Requires-Dist: google-auth>=2.35.0
|
17
|
+
Provides-Extra: server
|
18
|
+
Requires-Dist: fastapi; extra == "server"
|
19
|
+
Requires-Dist: uvicorn; extra == "server"
|
16
20
|
Description-Content-Type: text/markdown
|
17
21
|
|
18
22
|
# vectorvein
|
@@ -11,16 +11,23 @@ dependencies = [
|
|
11
11
|
"Pillow>=10.4.0",
|
12
12
|
"deepseek-tokenizer>=0.1.0",
|
13
13
|
"qwen-tokenizer>=0.2.0",
|
14
|
+
"google-auth>=2.35.0",
|
14
15
|
]
|
15
16
|
description = "Default template for PDM package"
|
16
17
|
name = "vectorvein"
|
17
18
|
readme = "README.md"
|
18
19
|
requires-python = ">=3.10"
|
19
|
-
version = "0.1.
|
20
|
+
version = "0.1.42"
|
20
21
|
|
21
22
|
[project.license]
|
22
23
|
text = "MIT"
|
23
24
|
|
25
|
+
[project.optional-dependencies]
|
26
|
+
server = [
|
27
|
+
"fastapi",
|
28
|
+
"uvicorn",
|
29
|
+
]
|
30
|
+
|
24
31
|
[build-system]
|
25
32
|
build-backend = "pdm.backend"
|
26
33
|
requires = [
|
@@ -4,16 +4,14 @@ import re
|
|
4
4
|
import json
|
5
5
|
from math import ceil
|
6
6
|
from typing import Iterable
|
7
|
+
|
7
8
|
import httpx
|
8
9
|
import tiktoken
|
9
10
|
from anthropic import Anthropic
|
10
|
-
from qwen_tokenizer import get_tokenizer
|
11
|
-
from deepseek_tokenizer import deepseek_tokenizer
|
12
11
|
|
13
12
|
from ..settings import settings
|
14
13
|
from ..utilities.retry import Retry
|
15
14
|
from ..types.enums import BackendType
|
16
|
-
from ..utilities.media_processing import ImageProcessor
|
17
15
|
from ..types.llm_parameters import (
|
18
16
|
NotGiven,
|
19
17
|
NOT_GIVEN,
|
@@ -116,7 +114,20 @@ def convert_type(value, value_type):
|
|
116
114
|
return value # 如果类型未知,返回原始值
|
117
115
|
|
118
116
|
|
119
|
-
def get_token_counts(text: str | dict, model: str = "") -> int:
|
117
|
+
def get_token_counts(text: str | dict, model: str = "", use_token_server_first: bool = False) -> int:
|
118
|
+
if use_token_server_first and settings.token_server is not None:
|
119
|
+
_, response = (
|
120
|
+
Retry(httpx.post)
|
121
|
+
.args(url=settings.token_server, json={"text": text, "model": model}, timeout=None)
|
122
|
+
.retry_times(5)
|
123
|
+
.sleep_time(1)
|
124
|
+
.run()
|
125
|
+
)
|
126
|
+
if response is None:
|
127
|
+
return 1000
|
128
|
+
result = response.json()
|
129
|
+
return result["total_tokens"]
|
130
|
+
|
120
131
|
if not isinstance(text, str):
|
121
132
|
text = str(text)
|
122
133
|
if model == "gpt-3.5-turbo":
|
@@ -128,6 +139,8 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
128
139
|
if len(model_setting.endpoints) == 0:
|
129
140
|
return int(len(text) / 1.33)
|
130
141
|
endpoint_id = model_setting.endpoints[0]
|
142
|
+
if isinstance(endpoint_id, dict):
|
143
|
+
endpoint_id = endpoint_id["endpoint_id"]
|
131
144
|
endpoint = settings.get_endpoint(endpoint_id)
|
132
145
|
tokenize_url = "https://api.minimax.chat/v1/tokenize"
|
133
146
|
headers = {"Authorization": f"Bearer {endpoint.api_key}", "Content-Type": "application/json"}
|
@@ -156,6 +169,8 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
156
169
|
if len(model_setting.endpoints) == 0:
|
157
170
|
return len(get_gpt_35_encoding().encode(text))
|
158
171
|
endpoint_id = model_setting.endpoints[0]
|
172
|
+
if isinstance(endpoint_id, dict):
|
173
|
+
endpoint_id = endpoint_id["endpoint_id"]
|
159
174
|
endpoint = settings.get_endpoint(endpoint_id)
|
160
175
|
tokenize_url = "https://api.moonshot.cn/v1/tokenizers/estimate-token-count"
|
161
176
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {endpoint.api_key}"}
|
@@ -181,6 +196,8 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
181
196
|
if len(model_setting.endpoints) == 0:
|
182
197
|
return len(get_gpt_35_encoding().encode(text))
|
183
198
|
endpoint_id = model_setting.endpoints[0]
|
199
|
+
if isinstance(endpoint_id, dict):
|
200
|
+
endpoint_id = endpoint_id["endpoint_id"]
|
184
201
|
endpoint = settings.get_endpoint(endpoint_id)
|
185
202
|
url = f"{endpoint.api_base}/models/{model_setting.id}:countTokens"
|
186
203
|
params = {"key": endpoint.api_key}
|
@@ -206,8 +223,12 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
206
223
|
elif model.startswith("claude"):
|
207
224
|
return Anthropic().count_tokens(text)
|
208
225
|
elif model.startswith("deepseek"):
|
226
|
+
from deepseek_tokenizer import deepseek_tokenizer
|
227
|
+
|
209
228
|
return len(deepseek_tokenizer.encode(text))
|
210
229
|
elif model.startswith("qwen"):
|
230
|
+
from qwen_tokenizer import get_tokenizer
|
231
|
+
|
211
232
|
qwen_tokenizer = get_tokenizer(model)
|
212
233
|
return len(qwen_tokenizer.encode(text))
|
213
234
|
elif model.startswith("stepfun"):
|
@@ -215,6 +236,8 @@ def get_token_counts(text: str | dict, model: str = "") -> int:
|
|
215
236
|
if len(model_setting.endpoints) == 0:
|
216
237
|
return len(get_gpt_35_encoding().encode(text))
|
217
238
|
endpoint_id = model_setting.endpoints[0]
|
239
|
+
if isinstance(endpoint_id, dict):
|
240
|
+
endpoint_id = endpoint_id["endpoint_id"]
|
218
241
|
endpoint = settings.get_endpoint(endpoint_id)
|
219
242
|
tokenize_url = "https://api.stepfun.com/v1/token/count"
|
220
243
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {endpoint.api_key}"}
|
@@ -372,6 +395,8 @@ def cutoff_messages(
|
|
372
395
|
|
373
396
|
|
374
397
|
def format_image_message(image: str, backend: BackendType = BackendType.OpenAI) -> dict:
|
398
|
+
from ..utilities.media_processing import ImageProcessor
|
399
|
+
|
375
400
|
image_processor = ImageProcessor(image_source=image)
|
376
401
|
if backend == BackendType.OpenAI:
|
377
402
|
return {
|
@@ -0,0 +1,47 @@
|
|
1
|
+
import uvicorn
|
2
|
+
from pydantic import BaseModel
|
3
|
+
from fastapi import FastAPI, HTTPException
|
4
|
+
|
5
|
+
from ..settings import settings
|
6
|
+
from ..chat_clients.utils import get_token_counts
|
7
|
+
|
8
|
+
token_server = FastAPI()
|
9
|
+
|
10
|
+
|
11
|
+
class TokenCountRequest(BaseModel):
|
12
|
+
text: str | dict
|
13
|
+
model: str = ""
|
14
|
+
|
15
|
+
|
16
|
+
@token_server.post("/count_tokens")
|
17
|
+
async def count_tokens(request: TokenCountRequest):
|
18
|
+
try:
|
19
|
+
token_count = get_token_counts(request.text, request.model, use_token_server_first=False)
|
20
|
+
return {"total_tokens": token_count}
|
21
|
+
except Exception as e:
|
22
|
+
raise HTTPException(status_code=500, detail=str(e))
|
23
|
+
|
24
|
+
|
25
|
+
def run_token_server(host: str | None = None, port: int | None = None):
|
26
|
+
"""
|
27
|
+
启动一个简单的HTTP服务器来处理token计数请求。参数均留空则使用 settings.token_server 的配置。
|
28
|
+
|
29
|
+
参数:
|
30
|
+
host (str): 服务器主机地址。
|
31
|
+
port (int): 服务器端口。
|
32
|
+
"""
|
33
|
+
if host is None or port is None:
|
34
|
+
token_server_url = settings.token_server
|
35
|
+
if token_server_url is None:
|
36
|
+
raise ValueError("Token server is not enabled.")
|
37
|
+
|
38
|
+
_host, _port = token_server_url.split(":")
|
39
|
+
else:
|
40
|
+
_host = host
|
41
|
+
_port = port
|
42
|
+
|
43
|
+
uvicorn.run(token_server, host=_host, port=int(_port))
|
44
|
+
|
45
|
+
|
46
|
+
if __name__ == "__main__":
|
47
|
+
run_token_server()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# @Author: Bi Ying
|
2
2
|
# @Date: 2024-07-27 00:30:56
|
3
|
-
from typing import List, Dict
|
3
|
+
from typing import List, Dict, Optional
|
4
4
|
|
5
5
|
from pydantic import BaseModel, Field
|
6
6
|
|
@@ -13,6 +13,7 @@ class Settings(BaseModel):
|
|
13
13
|
endpoints: List[EndpointSetting] = Field(
|
14
14
|
default_factory=list, description="Available endpoints for the LLM service."
|
15
15
|
)
|
16
|
+
token_server: Optional[str] = Field(default=None, description="Token server address. Format: host:port")
|
16
17
|
|
17
18
|
anthropic: BackendSettings = Field(default_factory=BackendSettings, description="Anthropic models settings.")
|
18
19
|
deepseek: BackendSettings = Field(default_factory=BackendSettings, description="Deepseek models settings.")
|
@@ -0,0 +1,148 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-07-27 12:03:49
|
3
|
+
import base64
|
4
|
+
from io import BytesIO
|
5
|
+
from pathlib import Path
|
6
|
+
from functools import cached_property
|
7
|
+
|
8
|
+
import httpx
|
9
|
+
from PIL import Image
|
10
|
+
from PIL.ImageFile import ImageFile
|
11
|
+
|
12
|
+
|
13
|
+
class ImageProcessor:
|
14
|
+
def __init__(
|
15
|
+
self,
|
16
|
+
image_source: Image.Image | str | Path,
|
17
|
+
max_size: int | None = 5 * 1024 * 1024,
|
18
|
+
max_width: int | None = None,
|
19
|
+
max_height: int | None = None,
|
20
|
+
):
|
21
|
+
self.image_source = image_source
|
22
|
+
if isinstance(image_source, (Image.Image, Path)):
|
23
|
+
self.is_local = True
|
24
|
+
else:
|
25
|
+
self.is_local = not image_source.startswith("http")
|
26
|
+
self.max_size = max_size
|
27
|
+
self.max_width = max_width
|
28
|
+
self.max_height = max_height
|
29
|
+
self._image = self._load_image()
|
30
|
+
self._image_format = self._image.format or "JPEG"
|
31
|
+
self._cached_bytes = None
|
32
|
+
self._cached_base64_image = None
|
33
|
+
|
34
|
+
def _load_image(self):
|
35
|
+
if not self.is_local and isinstance(self.image_source, str):
|
36
|
+
image_url = self.image_source
|
37
|
+
response = httpx.get(image_url)
|
38
|
+
return Image.open(BytesIO(response.content))
|
39
|
+
elif isinstance(self.image_source, Path):
|
40
|
+
return Image.open(self.image_source)
|
41
|
+
elif isinstance(self.image_source, Image.Image):
|
42
|
+
return self.image_source
|
43
|
+
else:
|
44
|
+
raise ValueError(f"Unsupported image source type: {type(self.image_source)}")
|
45
|
+
|
46
|
+
def _resize_image(
|
47
|
+
self,
|
48
|
+
img: ImageFile | Image.Image,
|
49
|
+
max_size: int | None = None,
|
50
|
+
max_width: int | None = None,
|
51
|
+
max_height: int | None = None,
|
52
|
+
):
|
53
|
+
img_bytes = BytesIO()
|
54
|
+
image_format = img.format or "JPEG"
|
55
|
+
_img = img.copy()
|
56
|
+
_img.save(img_bytes, format=image_format, optimize=True)
|
57
|
+
|
58
|
+
if max_width is not None and _img.width > max_width:
|
59
|
+
new_size = (max_width, int(max_width * _img.height / _img.width))
|
60
|
+
_img = _img.resize(new_size, Image.Resampling.LANCZOS)
|
61
|
+
|
62
|
+
if max_height is not None and _img.height > max_height:
|
63
|
+
new_size = (int(max_height * _img.width / _img.height), max_height)
|
64
|
+
_img = _img.resize(new_size, Image.Resampling.LANCZOS)
|
65
|
+
|
66
|
+
img_bytes = BytesIO()
|
67
|
+
_img.save(img_bytes, format=image_format, optimize=True)
|
68
|
+
|
69
|
+
if max_size is not None and img_bytes.getbuffer().nbytes <= max_size:
|
70
|
+
return img_bytes
|
71
|
+
|
72
|
+
original_size = _img.size
|
73
|
+
scale_factor = 0.9
|
74
|
+
|
75
|
+
while True:
|
76
|
+
new_size = (int(original_size[0] * scale_factor), int(original_size[1] * scale_factor))
|
77
|
+
img_resized = _img.resize(new_size, Image.Resampling.LANCZOS)
|
78
|
+
|
79
|
+
img_bytes_resized = BytesIO()
|
80
|
+
img_resized.save(img_bytes_resized, format=image_format, optimize=True)
|
81
|
+
|
82
|
+
if max_size is not None and img_bytes_resized.getbuffer().nbytes <= max_size:
|
83
|
+
return img_bytes_resized
|
84
|
+
|
85
|
+
scale_factor -= 0.1
|
86
|
+
if scale_factor < 0.1:
|
87
|
+
return img_bytes_resized
|
88
|
+
|
89
|
+
@property
|
90
|
+
def bytes(self):
|
91
|
+
if self._cached_bytes is not None:
|
92
|
+
return self._cached_bytes
|
93
|
+
if self.max_size is None and self.max_width is None and self.max_height is None:
|
94
|
+
if isinstance(self._image, Image.Image):
|
95
|
+
img_bytes = BytesIO()
|
96
|
+
|
97
|
+
# 检查图像是否有透明通道
|
98
|
+
has_transparency = self._image.mode in ("RGBA", "LA") or (
|
99
|
+
self._image.mode == "P" and "transparency" in self._image.info
|
100
|
+
)
|
101
|
+
|
102
|
+
if has_transparency:
|
103
|
+
# 如果有透明通道,使用PNG格式
|
104
|
+
save_format = "PNG"
|
105
|
+
self._image_format = "PNG"
|
106
|
+
else:
|
107
|
+
# 如果没有透明通道,使用原始格式或默认为JPEG
|
108
|
+
save_format = self._image.format or self._image_format or "JPEG"
|
109
|
+
|
110
|
+
# 如果图像模式不是RGB(例如RGBA),转换为RGB
|
111
|
+
if self._image.mode != "RGB":
|
112
|
+
self._image = self._image.convert("RGB")
|
113
|
+
|
114
|
+
self._image.save(img_bytes, format=save_format, optimize=True)
|
115
|
+
self._cached_bytes = img_bytes.getvalue()
|
116
|
+
return self._cached_bytes
|
117
|
+
elif isinstance(self._image, BytesIO):
|
118
|
+
self._cached_bytes = self._image.getvalue()
|
119
|
+
return self._cached_bytes
|
120
|
+
elif isinstance(self._image, ImageFile):
|
121
|
+
if self._image.fp is None:
|
122
|
+
raise ValueError("Image file is not open")
|
123
|
+
self._cached_bytes = self._image.fp.read()
|
124
|
+
return self._cached_bytes
|
125
|
+
|
126
|
+
self._cached_bytes = self._image.getvalue()
|
127
|
+
return self._cached_bytes
|
128
|
+
|
129
|
+
img_bytes_resized = self._resize_image(self._image, self.max_size, self.max_width, self.max_height)
|
130
|
+
return img_bytes_resized.getvalue()
|
131
|
+
|
132
|
+
@property
|
133
|
+
def base64_image(self):
|
134
|
+
if self.max_size is None and self.max_width is None and self.max_height is None:
|
135
|
+
self._cached_base64_image = base64.b64encode(self.bytes).decode()
|
136
|
+
return self._cached_base64_image
|
137
|
+
|
138
|
+
img_bytes_resized = self._resize_image(self._image, self.max_size, self.max_width, self.max_height)
|
139
|
+
self._cached_base64_image = base64.b64encode(img_bytes_resized.getvalue()).decode()
|
140
|
+
return self._cached_base64_image
|
141
|
+
|
142
|
+
@property
|
143
|
+
def mime_type(self):
|
144
|
+
return Image.MIME[self._image_format]
|
145
|
+
|
146
|
+
@cached_property
|
147
|
+
def data_url(self):
|
148
|
+
return f"data:{self.mime_type};base64,{self.base64_image}"
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# @Author: Bi Ying
|
2
|
+
# @Date: 2024-06-07 16:16:49
|
3
|
+
import time
|
4
|
+
from typing import Optional, Any, Callable, Tuple, Union, TypeVar, Generic
|
5
|
+
|
6
|
+
|
7
|
+
ResultType = TypeVar("ResultType")
|
8
|
+
|
9
|
+
|
10
|
+
class Retry(Generic[ResultType]):
|
11
|
+
def __init__(self, function: Callable[..., ResultType]):
|
12
|
+
self.function: Callable[..., ResultType] = function
|
13
|
+
self.__retry_times: int = 3
|
14
|
+
self.__sleep_time: Union[int, float] = 1
|
15
|
+
self.__timeout: int = 180
|
16
|
+
self.__result_check: Optional[Callable[[ResultType], bool]] = None
|
17
|
+
self.pargs: list = []
|
18
|
+
self.kwargs: dict = {}
|
19
|
+
|
20
|
+
def args(self, *args: Any, **kwargs: Any) -> "Retry[ResultType]":
|
21
|
+
self.pargs = list(args)
|
22
|
+
self.kwargs = kwargs
|
23
|
+
return self
|
24
|
+
|
25
|
+
def retry_times(self, retry_times: int) -> "Retry[ResultType]":
|
26
|
+
self.__retry_times = retry_times
|
27
|
+
return self
|
28
|
+
|
29
|
+
def sleep_time(self, sleep_time: Union[int, float]) -> "Retry[ResultType]":
|
30
|
+
self.__sleep_time = sleep_time
|
31
|
+
return self
|
32
|
+
|
33
|
+
def result_check(self, check_function: Callable[[ResultType], bool]) -> "Retry[ResultType]":
|
34
|
+
self.__result_check = check_function
|
35
|
+
return self
|
36
|
+
|
37
|
+
def _check_result(self, result: ResultType) -> bool:
|
38
|
+
try:
|
39
|
+
if self.__result_check is None:
|
40
|
+
return True
|
41
|
+
return self.__result_check(result)
|
42
|
+
except Exception as e:
|
43
|
+
print(f"Retry result check error: {e}")
|
44
|
+
return False
|
45
|
+
|
46
|
+
def run(self) -> Tuple[bool, Optional[ResultType]]:
|
47
|
+
try_times = 0
|
48
|
+
start_time = time.time()
|
49
|
+
|
50
|
+
while try_times <= self.__retry_times and time.time() - start_time < self.__timeout:
|
51
|
+
try:
|
52
|
+
result: ResultType = self.function(*self.pargs, **self.kwargs)
|
53
|
+
if self._check_result(result):
|
54
|
+
return True, result
|
55
|
+
try_times += 1
|
56
|
+
time.sleep(self.__sleep_time)
|
57
|
+
except Exception as e:
|
58
|
+
print(f"{self.function.__name__} function error: {e}")
|
59
|
+
try_times += 1
|
60
|
+
time.sleep(self.__sleep_time)
|
61
|
+
|
62
|
+
return False, None
|
@@ -1,70 +0,0 @@
|
|
1
|
-
# @Author: Bi Ying
|
2
|
-
# @Date: 2024-07-27 12:03:49
|
3
|
-
import base64
|
4
|
-
from io import BytesIO
|
5
|
-
from pathlib import Path
|
6
|
-
from functools import cached_property
|
7
|
-
|
8
|
-
import httpx
|
9
|
-
from PIL import Image
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
class ImageProcessor:
|
14
|
-
def __init__(self, image_source: Image.Image | str | Path, max_size: int | None = 5 * 1024 * 1024):
|
15
|
-
self.image_source = image_source
|
16
|
-
if isinstance(image_source, (Image.Image, Path)):
|
17
|
-
self.is_local = True
|
18
|
-
else:
|
19
|
-
self.is_local = not image_source.startswith("http")
|
20
|
-
self.max_size = max_size
|
21
|
-
self._image = self._load_image()
|
22
|
-
|
23
|
-
def _load_image(self):
|
24
|
-
if not self.is_local:
|
25
|
-
image_url = self.image_source
|
26
|
-
response = httpx.get(image_url)
|
27
|
-
return Image.open(BytesIO(response.content))
|
28
|
-
else:
|
29
|
-
return Image.open(self.image_source)
|
30
|
-
|
31
|
-
def _resize_image(self, img, max_size):
|
32
|
-
img_bytes = BytesIO()
|
33
|
-
img.save(img_bytes, format=img.format, optimize=True)
|
34
|
-
|
35
|
-
if img_bytes.getbuffer().nbytes <= max_size:
|
36
|
-
return img_bytes
|
37
|
-
|
38
|
-
original_size = img.size
|
39
|
-
scale_factor = 0.9
|
40
|
-
|
41
|
-
while True:
|
42
|
-
new_size = (int(original_size[0] * scale_factor), int(original_size[1] * scale_factor))
|
43
|
-
img_resized = img.resize(new_size, Image.Resampling.LANCZOS)
|
44
|
-
|
45
|
-
img_bytes_resized = BytesIO()
|
46
|
-
img_resized.save(img_bytes_resized, format=img.format, optimize=True)
|
47
|
-
|
48
|
-
if img_bytes_resized.getbuffer().nbytes <= max_size:
|
49
|
-
return img_bytes_resized
|
50
|
-
|
51
|
-
scale_factor -= 0.1
|
52
|
-
if scale_factor < 0.1:
|
53
|
-
return img_bytes_resized
|
54
|
-
|
55
|
-
@cached_property
|
56
|
-
def base64_image(self):
|
57
|
-
if self.max_size is None:
|
58
|
-
return base64.b64encode(self._image.getvalue()).decode()
|
59
|
-
|
60
|
-
img_bytes_resized = self._resize_image(self._image, self.max_size)
|
61
|
-
return base64.b64encode(img_bytes_resized.getvalue()).decode()
|
62
|
-
|
63
|
-
@cached_property
|
64
|
-
def mime_type(self):
|
65
|
-
return Image.MIME[self._image.format]
|
66
|
-
|
67
|
-
@cached_property
|
68
|
-
def data_url(self):
|
69
|
-
return f"data:{self.mime_type};base64,{self.base64_image}"
|
70
|
-
|
@@ -1,36 +0,0 @@
|
|
1
|
-
# @Author: Bi Ying
|
2
|
-
# @Date: 2024-08-14 13:03:10
|
3
|
-
import time
|
4
|
-
|
5
|
-
|
6
|
-
class Retry:
|
7
|
-
def __init__(self, function):
|
8
|
-
self.function = function
|
9
|
-
self.__retry_times = 3
|
10
|
-
self.__sleep_time = 1
|
11
|
-
self.pargs = []
|
12
|
-
self.kwargs = {}
|
13
|
-
|
14
|
-
def args(self, *args, **kwargs):
|
15
|
-
self.pargs = args
|
16
|
-
self.kwargs = kwargs
|
17
|
-
return self
|
18
|
-
|
19
|
-
def retry_times(self, retry_times: int):
|
20
|
-
self.__retry_times = retry_times
|
21
|
-
return self
|
22
|
-
|
23
|
-
def sleep_time(self, sleep_time):
|
24
|
-
self.__sleep_time = sleep_time
|
25
|
-
return self
|
26
|
-
|
27
|
-
def run(self):
|
28
|
-
try_times = 0
|
29
|
-
while try_times < self.__retry_times:
|
30
|
-
try:
|
31
|
-
return True, self.function(*self.pargs, **self.kwargs)
|
32
|
-
except Exception as e:
|
33
|
-
print(f"{self.function.__name__} 函数出错:{e}")
|
34
|
-
try_times += 1
|
35
|
-
time.sleep(self.__sleep_time)
|
36
|
-
return False, None
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.1.40 → vectorvein-0.1.42}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|