MeUtils 2025.9.9.19.39.12__py3-none-any.whl → 2025.9.11.12.22.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/_openaisdk/openai_images.py +16 -6
- examples/_openaisdk/openai_router.py +9 -7
- meutils/apis/chatglm/glm_video_api.py +2 -2
- meutils/apis/fal/images.py +1 -1
- meutils/apis/images/generations.py +46 -3
- meutils/apis/volcengine_apis/videos.py +3 -2
- meutils/data/VERSION +1 -1
- meutils/io/files_utils.py +34 -8
- meutils/llm/check_utils.py +1 -2
- meutils/llm/completions/qwenllm.py +2 -2
- meutils/llm/models/siliconflow.py +4 -0
- meutils/llm/openai_utils/adapters.py +12 -2
- meutils/office_automation/pdf.py +3 -2
- meutils/schemas/image_types.py +52 -7
- meutils/schemas/oneapi/common.py +20 -7
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/METADATA +261 -261
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/RECORD +21 -21
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/WHEEL +0 -0
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/entry_points.txt +0 -0
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/licenses/LICENSE +0 -0
- {meutils-2025.9.9.19.39.12.dist-info → meutils-2025.9.11.12.22.1.dist-info}/top_level.txt +0 -0
@@ -50,7 +50,6 @@ data = {
|
|
50
50
|
# 'model': 'step-1x-medium',
|
51
51
|
'model': 'seededit',
|
52
52
|
|
53
|
-
|
54
53
|
# 'n': 2, 'quality': 'hd', 'response_format': 'url', 'size': '1024x1024', 'style': 'vivid',
|
55
54
|
# 'extra_body': {'guidance_scale': 4.5, 'num_inference_steps': 25, 'seed': None, 'negative_prompt': None}
|
56
55
|
}
|
@@ -132,14 +131,20 @@ with timer('image'):
|
|
132
131
|
|
133
132
|
client = OpenAI(
|
134
133
|
|
135
|
-
api_key=os.getenv("FFIRE_API_KEY"),
|
136
|
-
base_url=os.getenv("FFIRE_BASE_URL"),
|
134
|
+
# api_key=os.getenv("FFIRE_API_KEY"),
|
135
|
+
# base_url=os.getenv("FFIRE_BASE_URL"),
|
137
136
|
|
138
137
|
# api_key="6b419ce2-096c-44ce-b2f5-0914ee8f3cf8",
|
139
138
|
# base_url=os.getenv("VOLC_BASE_URL")
|
140
139
|
|
141
140
|
# api_key=os.getenv("OPENAI_API_KEY") +,
|
142
141
|
# api_key=os.getenv("OPENAI_API_KEY") + "-359", # 3083
|
142
|
+
# api_key=os.getenv("OPENAI_API_KEY") + "-21227", # 3083
|
143
|
+
|
144
|
+
base_url="https://api.pisces.ink/v1",
|
145
|
+
# base_url="https://all.chatfire.cn/pisces/v1",
|
146
|
+
api_key="pisces-76e2d5da837d4575847abc06bda84d200"
|
147
|
+
|
143
148
|
# api_key=os.getenv("OPENAI_API_KEY_OPENAI") + "-3083",
|
144
149
|
|
145
150
|
# api_key=os.getenv("SILICONFLOW_API_KEY"),
|
@@ -206,19 +211,21 @@ with timer('image'):
|
|
206
211
|
# model = "recraft-v3"
|
207
212
|
# model = "fal-ai/recraft-v3"
|
208
213
|
# model = "flux-pro-1.1-ultra"
|
209
|
-
prompt = '一只可爱的边牧在坐公交车,卡通贴纸。动漫3D风格,超写实油画,超高分辨率,最好的质量,8k'
|
210
|
-
|
214
|
+
# prompt = '一只可爱的边牧在坐公交车,卡通贴纸。动漫3D风格,超写实油画,超高分辨率,最好的质量,8k'
|
215
|
+
prompt = "裸体女孩"
|
211
216
|
|
212
217
|
model = "doubao-seedream-3-0-t2i-250415"
|
213
218
|
model = "black-forest-labs/FLUX.1-Krea-dev"
|
214
219
|
model = "Qwen/Qwen-Image"
|
215
220
|
model = "MusePublic/FLUX.1-Kontext-Dev"
|
216
|
-
model = "doubao-seedream-4-0-250828"
|
221
|
+
model = "doubao-seedream-4-0-250828" # https://ark.cn-beijing.volces.com/api/v3/chat/completions
|
217
222
|
# model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-SuperOutpainting"
|
218
223
|
# model = "DiffSynth-Studio/FLUX.1-Kontext-dev-lora-highresfix"
|
219
224
|
# # model = "black-forest-labs/FLUX.1-Kontext-dev"
|
220
225
|
# model="DiffSynth-Studio/FLUX.1-Kontext-dev-lora-ArtAug"
|
221
226
|
|
227
|
+
model = "gemini-2.5-flash-image-preview"
|
228
|
+
|
222
229
|
# flux-kontext-dev
|
223
230
|
|
224
231
|
response = client.images.generate(
|
@@ -259,3 +266,6 @@ with timer('image'):
|
|
259
266
|
# openai.APIStatusError: Error code: 402 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details', 'type': 'quota_exceeded'}}
|
260
267
|
|
261
268
|
|
269
|
+
|
270
|
+
client.images.edit()
|
271
|
+
client.images.generate()
|
@@ -17,8 +17,8 @@ from meutils.str_utils import parse_base64
|
|
17
17
|
# gets API Key from environment variable OPENAI_API_KEY
|
18
18
|
client = OpenAI(
|
19
19
|
# base_url="https://openrouter.ai/api/v1",
|
20
|
-
base_url="https://all.chatfire.cn/openrouter/v1",
|
21
|
-
api_key=os.getenv("OPENROUTER_API_KEY"),
|
20
|
+
# base_url="https://all.chatfire.cn/openrouter/v1",
|
21
|
+
# api_key=os.getenv("OPENROUTER_API_KEY"),
|
22
22
|
#
|
23
23
|
# base_url="http://38.46.219.252:9001/v1",
|
24
24
|
#
|
@@ -28,6 +28,9 @@ client = OpenAI(
|
|
28
28
|
# api_key = "sk-qOpbMHesasoVgX75ZoeEeBEf1R9dmsUZVAPcu5KkvLFhElrn"
|
29
29
|
# api_key="sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M",
|
30
30
|
# base_url="https://new.yunai.link/v1"
|
31
|
+
|
32
|
+
base_url="https://api.pisces.ink/v1",
|
33
|
+
api_key="pisces-03dedafafcbf4c1b9c87530858510932"
|
31
34
|
)
|
32
35
|
|
33
36
|
# (content=' \n'
|
@@ -40,17 +43,18 @@ completion = client.chat.completions.create(
|
|
40
43
|
# model="openai/o1",
|
41
44
|
# model="deepseek/deepseek-r1-0528-qwen3-8b:free",
|
42
45
|
# model="google/gemini-2.5-flash-image-preview:free",
|
43
|
-
model="deepseek/deepseek-chat-v3.1:free",
|
46
|
+
# model="deepseek/deepseek-chat-v3.1:free",
|
44
47
|
# model="gemini-2.0-flash-exp-image-generation",
|
48
|
+
model="gemini-2.5-flash-image-preview",
|
45
49
|
max_tokens=10,
|
46
|
-
extra_body={"reasoning_stream": True},
|
50
|
+
# extra_body={"reasoning_stream": True},
|
47
51
|
messages=[
|
48
52
|
{
|
49
53
|
"role": "user",
|
50
54
|
"content": [
|
51
55
|
{
|
52
56
|
"type": "text",
|
53
|
-
"text": "
|
57
|
+
"text": "画条狗"
|
54
58
|
},
|
55
59
|
# {
|
56
60
|
# "type": "image_url",
|
@@ -66,12 +70,10 @@ print(completion.choices[0].message.content)
|
|
66
70
|
# arun(to_url(completion.choices[0].message.images[0]['image_url']['url'], content_type="image/png"))
|
67
71
|
|
68
72
|
|
69
|
-
|
70
73
|
b64_list = parse_base64(completion.choices[0].message.content)
|
71
74
|
|
72
75
|
arun(to_url(b64_list, content_type="image/png"))
|
73
76
|
|
74
|
-
|
75
77
|
# '好的,旁边加一只戴墨镜的狗。\n\n)
|
77
79
|
|
@@ -38,7 +38,7 @@ async def create_task(request: VideoRequest, api_key: Optional[str] = None):
|
|
38
38
|
proxy = await get_one_proxy()
|
39
39
|
client = ZhipuAI(
|
40
40
|
api_key=api_key,
|
41
|
-
http_client=httpx.Client(proxy=proxy)
|
41
|
+
# http_client=httpx.Client(proxy=proxy)
|
42
42
|
|
43
43
|
) # 请填写您自己的APIKey
|
44
44
|
response = client.videos.generations(
|
@@ -109,7 +109,7 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
|
|
109
109
|
|
110
110
|
# VideoResult
|
111
111
|
if __name__ == '__main__':
|
112
|
-
api_key = "
|
112
|
+
api_key = "47e81002bdb748f7a6e59c1e3ab2bf5d.UQ0GcNPerKv0H5zY"
|
113
113
|
|
114
114
|
# api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
|
115
115
|
# api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
|
meutils/apis/fal/images.py
CHANGED
@@ -259,7 +259,7 @@ if __name__ == '__main__':
|
|
259
259
|
|
260
260
|
#
|
261
261
|
prompt = "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp Put the little duckling on top of the woman's t-shirt."
|
262
|
-
|
262
|
+
# prompt = '把小鸭子放在女人的T恤上面。\nhttps://s3.ffire.cc/cdn/20250530/tEzZKkhp3tKbNzva6mgC2T\nhttps://s3.ffire.cc/cdn/20250530/AwHJpuJuNg5w3sVbH4PZdv'
|
263
263
|
request = ImageRequest(prompt=prompt, model=model)
|
264
264
|
|
265
265
|
data = {
|
@@ -7,10 +7,12 @@
|
|
7
7
|
# @WeChat : meutils
|
8
8
|
# @Software : PyCharm
|
9
9
|
# @Description : 统一收口
|
10
|
+
import os
|
10
11
|
|
11
12
|
from meutils.pipe import *
|
12
13
|
from meutils.llm.clients import AsyncClient
|
13
14
|
from meutils.llm.openai_utils import to_openai_params
|
15
|
+
from meutils.io.files_utils import to_png
|
14
16
|
|
15
17
|
from meutils.schemas.image_types import ImageRequest, RecraftImageRequest
|
16
18
|
|
@@ -33,7 +35,8 @@ async def generate(
|
|
33
35
|
api_key: Optional[str] = None,
|
34
36
|
base_url: Optional[str] = None,
|
35
37
|
):
|
36
|
-
|
38
|
+
if len(str(request)) < 1024:
|
39
|
+
logger.debug(request)
|
37
40
|
|
38
41
|
if request.model.startswith("fal-ai"): # 主要 request.image
|
39
42
|
return await fal_generate(request, api_key)
|
@@ -74,10 +77,24 @@ async def generate(
|
|
74
77
|
request = ImageRequest(**data)
|
75
78
|
if request.model.startswith("doubao"):
|
76
79
|
request.watermark = False
|
77
|
-
if request.
|
80
|
+
if request.model.startswith("doubao-seedream-4"):
|
81
|
+
if not any(i in str(request.image) for i in {".png", ".jpeg", "image/png", "image/jpeg"}):
|
82
|
+
logger.debug(f"{request.model}: image 不是 png 或 jpeg 格式,转换为 png 格式")
|
83
|
+
request.image = await to_png(request.image, response_format='b64')
|
84
|
+
|
85
|
+
if request.n > 1:
|
86
|
+
request.sequential_image_generation = "auto"
|
87
|
+
request.sequential_image_generation_options = {
|
88
|
+
"max_images": request.n
|
89
|
+
}
|
90
|
+
elif request.image and isinstance(request.image, list):
|
78
91
|
request.image = request.image[0]
|
79
92
|
|
80
93
|
data = to_openai_params(request)
|
94
|
+
|
95
|
+
if len(str(data)) < 1024:
|
96
|
+
logger.debug(bjson(data))
|
97
|
+
|
81
98
|
client = AsyncClient(api_key=api_key, base_url=base_url)
|
82
99
|
return await client.images.generate(**data)
|
83
100
|
|
@@ -89,4 +106,30 @@ if __name__ == '__main__':
|
|
89
106
|
# arun(generate(ImageRequest(model="FLUX_1-Krea-dev", prompt="笑起来")))
|
90
107
|
|
91
108
|
token = f"""{os.getenv("VOLC_ACCESSKEY")}|{os.getenv("VOLC_SECRETKEY")}"""
|
92
|
-
arun(generate(ImageRequest(model="seed", prompt="笑起来"), api_key=token))
|
109
|
+
# arun(generate(ImageRequest(model="seed", prompt="笑起来"), api_key=token))
|
110
|
+
|
111
|
+
request = ImageRequest(model="doubao-seedream-4-0-250828", prompt="笑起来",
|
112
|
+
image="https://s3.ffire.cc/cdn/20250909/jJnBunxbr2Topzqnu6rKrc_None")
|
113
|
+
|
114
|
+
request = ImageRequest(
|
115
|
+
model="doubao-seedream-4-0-250828",
|
116
|
+
prompt="将小鸭子放在t恤上",
|
117
|
+
image=[
|
118
|
+
"https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp",
|
119
|
+
"https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp"
|
120
|
+
]
|
121
|
+
)
|
122
|
+
|
123
|
+
# todo: tokens 4096 1张
|
124
|
+
|
125
|
+
# 组图
|
126
|
+
request = ImageRequest(
|
127
|
+
model="doubao-seedream-4-0-250828",
|
128
|
+
prompt="参考这个LOGO,做一套户外运动品牌视觉设计,品牌名称为GREEN,包括包装袋、帽子、纸盒、手环、挂绳等。绿色视觉主色调,趣味、简约现代风格",
|
129
|
+
image="https://ark-project.tos-cn-beijing.volces.com/doc_image/seedream4_imageToimages.png",
|
130
|
+
n=3
|
131
|
+
)
|
132
|
+
|
133
|
+
# arun(generate(request, api_key=os.getenv("FFIRE_API_KEY"), base_url=os.getenv("FFIRE_BASE_URL"))) # +"-29494"
|
134
|
+
|
135
|
+
print(not any(i in str(request.image) for i in {".png", ".jpeg", "image/png", "image/jpeg"}))
|
@@ -35,7 +35,7 @@ async def get_valid_token(tokens: Optional[list] = None, batch_size: Optional[in
|
|
35
35
|
5. 校验通过则返回token
|
36
36
|
"""
|
37
37
|
tokens = tokens or await get_series(FEISHU_URL, duplicated=True)
|
38
|
-
batch_size =
|
38
|
+
batch_size = batch_size or 1
|
39
39
|
|
40
40
|
if seed == 0 and (volc_tokens := await redis_aclient.get(f"volc_tokens")):
|
41
41
|
return volc_tokens.decode()
|
@@ -48,6 +48,7 @@ async def get_valid_token(tokens: Optional[list] = None, batch_size: Optional[in
|
|
48
48
|
if len(valid_tokens) == batch_size:
|
49
49
|
_ = '\n'.join(valid_tokens)
|
50
50
|
await redis_aclient.set(f"volc_tokens", _, ex=2 * 3600)
|
51
|
+
|
51
52
|
return _
|
52
53
|
|
53
54
|
|
@@ -266,7 +267,7 @@ c4356b58-4aa3-4a52-b907-b40c4dd2e502
|
|
266
267
|
# arun(get_task_from_feishu(ids, ))
|
267
268
|
|
268
269
|
# arun(get_valid_token(['a93ea9a5-3831-47b8-863a-57e10233f922']))
|
269
|
-
arun(get_valid_token(
|
270
|
+
arun(get_valid_token(random_choice=True))
|
270
271
|
|
271
272
|
"""
|
272
273
|
{'id': 'cgt-20250613160030-2dvd7',
|
meutils/data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
2025.09.
|
1
|
+
2025.09.11.12.22.01
|
meutils/io/files_utils.py
CHANGED
@@ -21,6 +21,7 @@ from meutils.oss.minio_oss import Minio
|
|
21
21
|
from starlette.datastructures import UploadFile
|
22
22
|
from contextlib import asynccontextmanager
|
23
23
|
from httpx import AsyncClient
|
24
|
+
from PIL import Image
|
24
25
|
|
25
26
|
|
26
27
|
def guess_mime_type(file):
|
@@ -154,7 +155,7 @@ async def to_url(
|
|
154
155
|
content_type: Optional[str] = None,
|
155
156
|
mime_type: Optional[str] = None,
|
156
157
|
|
157
|
-
):
|
158
|
+
): # 传入 url 是否 转存
|
158
159
|
if not file: return
|
159
160
|
|
160
161
|
content_type = content_type or mime_type
|
@@ -260,6 +261,27 @@ async def get_file_duration(filename: str = ".mp4", url: Optional[str] = None, c
|
|
260
261
|
return int(np.ceil(tag.duration or 10))
|
261
262
|
|
262
263
|
|
264
|
+
async def to_png(image: Union[bytes, List[bytes], str, List[str]], response_format: str = 'bytes') -> bytes:
|
265
|
+
"""
|
266
|
+
将 WebP 二进制数据无损转换为 PNG 二进制数据
|
267
|
+
"""
|
268
|
+
if isinstance(image, list):
|
269
|
+
tasks = [to_png(_, response_format) for _ in image]
|
270
|
+
return await asyncio.gather(*tasks)
|
271
|
+
|
272
|
+
if isinstance(image, str):
|
273
|
+
image = await to_bytes(image)
|
274
|
+
|
275
|
+
with Image.open(io.BytesIO(image)) as im:
|
276
|
+
out = io.BytesIO()
|
277
|
+
im.save(out, format="PNG") # 无损
|
278
|
+
|
279
|
+
if response_format != "bytes":
|
280
|
+
return await to_base64(out.getvalue(), content_type="image/png")
|
281
|
+
|
282
|
+
return out.getvalue()
|
283
|
+
|
284
|
+
|
263
285
|
if __name__ == '__main__':
|
264
286
|
# import tempfile
|
265
287
|
#
|
@@ -342,12 +364,16 @@ if __name__ == '__main__':
|
|
342
364
|
# arun(get_file_duration(url=url))
|
343
365
|
|
344
366
|
# r = arun(to_url([]))
|
345
|
-
text = "这是一个示例文本,包含一个图片: 这张图片很棒。"
|
367
|
+
# text = "这是一个示例文本,包含一个图片: 这张图片很棒。"
|
368
|
+
#
|
369
|
+
# arun(markdown_base64_to_url(
|
370
|
+
# text=text,
|
371
|
+
# # pattern=r'!\[.*?\]\((data:image/.*?)\)'
|
372
|
+
# # pattern=r'!\[.*?\]\((.*?)\)'
|
373
|
+
#
|
374
|
+
# )
|
375
|
+
# )
|
346
376
|
|
347
|
-
|
348
|
-
text=text,
|
349
|
-
# pattern=r'!\[.*?\]\((data:image/.*?)\)'
|
350
|
-
# pattern=r'!\[.*?\]\((.*?)\)'
|
377
|
+
webp_bytes = Path("/Users/betterme/PycharmProjects/AI/MeUtils/meutils/apis/images/image1.webp").read_bytes()
|
351
378
|
|
352
|
-
)
|
353
|
-
)
|
379
|
+
arun(to_png(webp_bytes, response_format='b64'))
|
meutils/llm/check_utils.py
CHANGED
@@ -214,7 +214,7 @@ async def check_token_for_sophnet(api_key, threshold: float = 1):
|
|
214
214
|
|
215
215
|
#
|
216
216
|
@retrying()
|
217
|
-
|
217
|
+
@rcache(ttl=7 * 24 * 3600, skip_cache_func=skip_cache_func)
|
218
218
|
async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[str] = None):
|
219
219
|
if not isinstance(api_key, str):
|
220
220
|
return await check_tokens(
|
@@ -262,7 +262,6 @@ async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[
|
|
262
262
|
response_format="url"
|
263
263
|
)
|
264
264
|
logger.debug(response.json())
|
265
|
-
response.raise_for_status()
|
266
265
|
|
267
266
|
else:
|
268
267
|
|
@@ -45,7 +45,7 @@ thinking_budget_mapping = {
|
|
45
45
|
}
|
46
46
|
|
47
47
|
COOKIE = """
|
48
|
-
|
48
|
+
_gcl_au=1.1.1093269050.1756349377;xlly_s=1;_bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb;cnaui=310cbdaf-3754-461c-a3ff-9ec8005329c9;isg=BCQknTSrEFaWtGtm_x0nSvW89SQWvUgnEt_awT5Fz--w6cezYs52t7gDqUFxMYB_;ssxmod_itna2=1-iqGhD50IThkG8Dhx_xmuxWKUt_EoG7DzxC5KY0CDmxjKidDRDB40QRTnf_Ti=qaeGrMwxrDPxD3r5iY80q7DFg0WeDBk4uAn8mY3vKFTl7S9o7EaoSeXnAOPBSok57ccC4rhgutjg2_8D7jk_lChKSSdfMbm2lKAUlRwIqjSxeNld4tMGxFev6zkUel_6LR_foIGQa5L4PuCGa6dUqvwISCODQh2TC6wQ1Hu=Ll=W4=W6s1E_V8Dr_1gDECghksL8zvQHiPI60ChnPodSvFnHjE2iXzGDdE_I5876eQ03cEzaFsA48KQLReNjiPp1I0EfN=5a=dziPIxXxcpup5zmGM2L48PYjAqiUxrwVQDY4vN=Ni_pu6pxOYEBNL7YA6RPcRy7Ak=Y5PPbZRAi242ulCDx3oZCGH2YE6p3lD4gPIOKMWX6AbmhYLrcMeXcYdjzGgaYEDq4DUtKP/1jf=vXt=MQXoZ23BWlE5h06cjceY_Bxw3AH3KeBaxT4pHEt19QSlaO20G9DfDq7Wf3BvV=5X/BYd54Y44nUHOfH_fV2mHKNz0W4lxjWDjY9H3m00I3cOIf3C6q7Y_CNx1sG1nwsiDUA34QDtsC8jw1YD;sca=aefac4ee;acw_tc=0a03e54a17574902359765651e1e9cf9780667ec2a2cefacce63a0b954bb63;atpsida=7e2e4dcfcd1c4a4530dd3395_1757490275_2;aui=310cbdaf-3754-461c-a3ff-9ec8005329c9;cna=KP9DIEqqyjUCATrw/+LjJV8F;ssxmod_itna=1-iqGhD50IThkG8Dhx_xmuxWKUt_EoG7DzxC5KY0CDmxjKidDRDB40QRTnf_Ti=qaeGrMwxrDyxGXoNexiNDAg40iDCbmLx5Yjdrq4NFtrojKaIjL4Q43rj9_8m0tnY/qmUTMU6Rljs7s66tqGI_DG2DYoDCqDS0DD99OdD4_3Dt4DIDAYDDxDWIeDB4ZrQDGPtLQut5eqKwBrtxi3QWWPQDiUCxivW56Wt5C_GwDDliPGfWepi20iDbqDuzKqaBeDLRPpvxB=PxYPmjmkUxBQGHniO/WWb2BkrGOGBKim6mTeM0O_qBGGhfyDGxNI0eYT44YxS4VQYQjytGBFDDWgL4_AzKl_TYx_7CIq13b1_BDCgtIQi_GK=DKoBddAQ2wmlBx/DYfjGeAzNA4aDhLlxtlzaiDD;tfstk=geyx-Dg4-gKxmZY2DPflIb1e9VjuX_qVPrrBIV0D1zU85rs2IlGD5dU-rirmhZz8VyUp5diXCbw8-cTbnog6NdUtyi8XG1y-XPUwhK1V7utSbmnbWlSqfVa_WVVcZ9q40Ak1-lBhKo8RmR1gkd9XVGitjCsoG1PJiYM1-wXoYFbWuAaGVPsqNugrXctjCPOWFD0ScEOb54GSADYjCAa12YiZcKi6GfZ5Fm0S5AM_5us-j4ijCAa_VgnZs7QtPngMBSZPyLNC_OpMI8nxMVpikdIljmcxRogRRwwJ3j3Qc4p18hgjf4iLFN8m78E7hD45EKHTVcUKh7QWHPEbTPoQlTLjDSZLxbeNRdnzNrloD7I6NqZIlrhY9Usx7-4YBjyC-Lubw8ytn7bvKVl_Z-lUna9xdWrmn5a5fdgTNcsPDJ2LcuTnJcAf2gdw_jirJjsq62sn5Go--iB9_ClP403h2wOw_jir22jA-CRZag5..;token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgwOTUwMzd9.JdVvyPkln2HcGm6ib0FKaF1qQ87lG1nf70oezhYZ2Jg;x-ap=cn-hongkong
|
49
49
|
""".strip()
|
50
50
|
|
51
51
|
|
@@ -396,7 +396,7 @@ if __name__ == '__main__':
|
|
396
396
|
)
|
397
397
|
token = None
|
398
398
|
|
399
|
-
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.
|
399
|
+
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgwOTUwMzd9.JdVvyPkln2HcGm6ib0FKaF1qQ87lG1nf70oezhYZ2Jg"
|
400
400
|
|
401
401
|
arun(create(request, token))
|
402
402
|
|
@@ -31,6 +31,9 @@ models_mapping = {
|
|
31
31
|
"kimi-k2-250711": "moonshotai/Kimi-K2-Instruct",
|
32
32
|
"kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
|
33
33
|
|
34
|
+
"kimi-k2-250905": "moonshotai/Kimi-K2-Instruct-0905",
|
35
|
+
"kimi-k2-0905-preview": "moonshotai/Kimi-K2-Instruct-0905",
|
36
|
+
|
34
37
|
"qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
|
35
38
|
"qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
|
36
39
|
"qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
|
@@ -63,6 +66,7 @@ def get_models_mapping():
|
|
63
66
|
models = client.models.list().data
|
64
67
|
models = {
|
65
68
|
m.id.removeprefix("Pro/").split('/', maxsplit=1)[-1].lower(): m.id.removeprefix("Pro/") for m in models
|
69
|
+
if any(i not in m.id.lower() for i in {"stable-diffusion"})
|
66
70
|
}
|
67
71
|
return {**models, **models_mapping}
|
68
72
|
|
@@ -7,6 +7,7 @@
|
|
7
7
|
# @WeChat : meutils
|
8
8
|
# @Software : PyCharm
|
9
9
|
# @Description :
|
10
|
+
import shortuuid
|
10
11
|
from aiostream import stream
|
11
12
|
|
12
13
|
from meutils.pipe import *
|
@@ -39,8 +40,12 @@ async def chat_for_image(
|
|
39
40
|
if request.model.startswith('fal'):
|
40
41
|
urls = await to_url_fal(image_urls, content_type="image/png") # 国外友好
|
41
42
|
image = urls
|
43
|
+
|
44
|
+
elif request.model.startswith("doubao-seed"):
|
45
|
+
image = image_urls # b64
|
46
|
+
|
42
47
|
else:
|
43
|
-
urls = await to_url(image_urls, content_type="image/png") # 数组
|
48
|
+
urls = await to_url(image_urls, ".png", content_type="image/png") # 数组
|
44
49
|
image = urls
|
45
50
|
|
46
51
|
image_request = ImageRequest(
|
@@ -91,7 +96,11 @@ async def chat_for_image(
|
|
91
96
|
future_task = asyncio.create_task(generate(image_request)) # 异步执行
|
92
97
|
|
93
98
|
async def gen():
|
94
|
-
|
99
|
+
exclude = None
|
100
|
+
if len(str(image_request.image)) > 1000:
|
101
|
+
exclude = {"image"}
|
102
|
+
|
103
|
+
text = image_request.model_dump_json(exclude_none=True, exclude=exclude).replace("free", "")
|
95
104
|
for i in f"""> 🖌️正在绘画\n\n```json\n{text}\n```\n\n""":
|
96
105
|
await asyncio.sleep(0.05)
|
97
106
|
yield i
|
@@ -155,6 +164,7 @@ async def chat_for_video(
|
|
155
164
|
|
156
165
|
if __name__ == '__main__':
|
157
166
|
from meutils.apis.images.generations import generate
|
167
|
+
|
158
168
|
request = CompletionRequest(
|
159
169
|
model="deepseek-r1-Distill-Qwen-1.5B",
|
160
170
|
messages=[
|
meutils/office_automation/pdf.py
CHANGED
@@ -93,9 +93,10 @@ def extract_images_from_pdf(file, output: Optional[str] = None):
|
|
93
93
|
|
94
94
|
if __name__ == '__main__':
|
95
95
|
with timer():
|
96
|
-
# r = extract_text('
|
96
|
+
# r = extract_text('上海证券交易所证券交易业务指南第8号——科创板股票做市(上证函〔2022〕1155号 20220715)-1757338961901 (1).pdf')
|
97
|
+
r = extract_text('非上市公司股权估值指引(2025年修订 中证协发〔2025〕86号 20250425 20250601)-1757078360106.pdf')
|
97
98
|
|
98
|
-
r = extract_images_from_pdf('《锋利的jQuery》(高清扫描版-有书签)_副本_加水印.pdf', 'images')
|
99
|
+
# r = extract_images_from_pdf('《锋利的jQuery》(高清扫描版-有书签)_副本_加水印.pdf', 'images')
|
99
100
|
|
100
101
|
# import tiktoken
|
101
102
|
# print(tiktoken.encoding_for_model('gpt-3.5-turbo'))
|
meutils/schemas/image_types.py
CHANGED
@@ -84,22 +84,34 @@ class ImagesResponse(_ImagesResponse):
|
|
84
84
|
class ImageRequest(BaseModel): # openai
|
85
85
|
"""
|
86
86
|
图生图 两种方式: prompt + controls
|
87
|
+
|
88
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
|
89
|
+
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
|
90
|
+
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
|
91
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
|
92
|
+
partial_images: Optional[int] | NotGiven = NOT_GIVEN,
|
93
|
+
|
87
94
|
"""
|
88
95
|
model: str = ''
|
89
96
|
|
90
97
|
prompt: constr(min_length=1, max_length=10240) = " "
|
91
98
|
|
92
|
-
|
99
|
+
moderation: Optional[Literal["low", "auto"]] = None
|
93
100
|
|
94
|
-
|
95
|
-
|
101
|
+
style: Optional[Union[str, Literal["vivid", "natural"]]] = None
|
102
|
+
background: Optional[Union[str, Literal["transparent", "opaque", "auto"]]] = None
|
103
|
+
input_fidelity: Optional[Union[str, Literal["high", "low"]]] = None
|
104
|
+
quality: Optional[Union[str, Literal["standard", "low", "medium", "high", "auto"]]] = None
|
96
105
|
|
97
106
|
# 测试默认值 Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
|
107
|
+
n: Optional[int] = 1
|
98
108
|
size: Optional[str] = '1024x1024' # null auto
|
99
109
|
|
110
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] = None
|
100
111
|
response_format: Optional[Literal["url", "b64_json", "oss_url", "glb", "stl"]] = "url"
|
101
112
|
|
102
113
|
seed: Optional[int] = None
|
114
|
+
stream: Optional[bool] = None
|
103
115
|
|
104
116
|
# oneapi https://github.com/QuantumNous/new-api/blob/main/dto/dalle.go
|
105
117
|
extra_fields: Optional[Any] = None ###
|
@@ -114,13 +126,18 @@ class ImageRequest(BaseModel): # openai
|
|
114
126
|
|
115
127
|
aspect_ratio: Optional[str] = None
|
116
128
|
|
117
|
-
user: Optional[str] = None
|
129
|
+
user: Optional[str] = None
|
118
130
|
|
119
131
|
image: Optional[Union[str, List[str]]] = None # url b64
|
120
132
|
watermark: Optional[bool] = None
|
121
133
|
|
122
134
|
def __init__(self, /, **data: Any):
|
123
135
|
super().__init__(**data)
|
136
|
+
# 规避空字符
|
137
|
+
self.style = self.style or None
|
138
|
+
self.quality = self.quality or None
|
139
|
+
self.background = self.background or None
|
140
|
+
self.input_fidelity = self.input_fidelity or None
|
124
141
|
|
125
142
|
if self.aspect_ratio: # 适配比例
|
126
143
|
self.size = ASPECT_RATIOS.get(self.aspect_ratio, '1024x1024')
|
@@ -187,24 +204,41 @@ class ImageRequest(BaseModel): # openai
|
|
187
204
|
class ImageEditRequest(BaseModel):
|
188
205
|
model: Union[str, Literal["dall-e-2", "dall-e-3", "gpt-image-1"]]
|
189
206
|
|
207
|
+
# image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
208
|
+
# prompt: str,
|
209
|
+
# mask: FileTypes | NotGiven = NOT_GIVEN,
|
210
|
+
|
190
211
|
prompt: str
|
191
212
|
image: Any # 图片
|
192
213
|
|
193
214
|
mask: Optional[Any] = None # 图片
|
194
|
-
|
215
|
+
|
216
|
+
background: Optional[Union[str, Literal["transparent", "opaque", "auto"]]] = None
|
217
|
+
input_fidelity: Optional[Union[str, Literal["high", "low"]]] = None
|
218
|
+
quality: Optional[Union[str, Literal["standard", "low", "medium", "high", "auto"]]] = None
|
195
219
|
|
196
220
|
n: Optional[int] = 1
|
197
|
-
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] = None
|
198
221
|
size: Optional[
|
199
222
|
Union[str, Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]] = "1024x1024"
|
200
|
-
response_format: Optional[Literal["url", "b64_json"]] = None
|
201
223
|
|
202
224
|
aspect_ratio: Optional[str] = None
|
203
225
|
|
204
226
|
user: Optional[str] = None
|
227
|
+
stream: Optional[bool] = None
|
228
|
+
|
229
|
+
response_format: Optional[Literal["url", "b64_json"]] = None
|
230
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] = None
|
231
|
+
|
232
|
+
# output_compression: Optional[int] | NotGiven = NOT_GIVEN,
|
233
|
+
# partial_images: Optional[int] | NotGiven = NOT_GIVEN,
|
205
234
|
|
206
235
|
def __init__(self, /, **data: Any):
|
207
236
|
super().__init__(**data)
|
237
|
+
# 规避空字符
|
238
|
+
self.quality = self.quality or None
|
239
|
+
self.background = self.background or None
|
240
|
+
self.input_fidelity = self.input_fidelity or None
|
241
|
+
|
208
242
|
if not isinstance(self.image, list):
|
209
243
|
self.image = [self.image]
|
210
244
|
|
@@ -622,6 +656,17 @@ if __name__ == '__main__':
|
|
622
656
|
|
623
657
|
aspect_ratio="16:9"
|
624
658
|
)
|
659
|
+
|
660
|
+
# "sequential_image_generation": "auto",
|
661
|
+
# "sequential_image_generation_options": {
|
662
|
+
# "max_images": 4
|
663
|
+
# },
|
664
|
+
|
665
|
+
request.sequential_image_generation = "auto"
|
666
|
+
|
667
|
+
request.sequential_image_generation_options = {
|
668
|
+
"max_images": 4
|
669
|
+
}
|
625
670
|
#
|
626
671
|
# print(request.prompt)
|
627
672
|
#
|
meutils/schemas/oneapi/common.py
CHANGED
@@ -621,6 +621,9 @@ MODEL_RATIO = {
|
|
621
621
|
"doubao-embedding-large-text-250515": 0.25,
|
622
622
|
"doubao-embedding-text-240715": 0.35,
|
623
623
|
|
624
|
+
"seed-oss-36b-instruct": 0.75,
|
625
|
+
"ling-mini-2.0": 0.25,
|
626
|
+
|
624
627
|
# 百川
|
625
628
|
'baichuan4-turbo': 7.5,
|
626
629
|
'baichuan4-air': 0.49,
|
@@ -665,12 +668,15 @@ MODEL_RATIO = {
|
|
665
668
|
"kimi": 5,
|
666
669
|
"kimi-128k": 5,
|
667
670
|
"kimi-dev-72b": 1,
|
668
|
-
"moonshotai/kimi-k2-instruct": 2,
|
669
|
-
"kimi-k2-0711-preview": 2,
|
670
671
|
"kimi-k2-turbo-preview": 2,
|
672
|
+
|
671
673
|
"kimi-k2-250711": 2,
|
674
|
+
"kimi-k2-0711-preview": 2,
|
675
|
+
"kimi-k2-instruct-0711": 2,
|
676
|
+
|
672
677
|
"kimi-k2-250905": 2,
|
673
678
|
"kimi-k2-0905-preview": 2,
|
679
|
+
"kimi-k2-instruct-0905": 2,
|
674
680
|
|
675
681
|
# 智谱 https://www.bigmodel.cn/pricing
|
676
682
|
'glm-4-9b-chat': 0.1,
|
@@ -1260,12 +1266,15 @@ COMPLETION_RATIO = {
|
|
1260
1266
|
"kimi-latest-128k": 3,
|
1261
1267
|
"kimi-dev-72b": 4,
|
1262
1268
|
"kimi-vl-a3b-thinking": 5,
|
1263
|
-
"moonshotai/kimi-k2-instruct": 4,
|
1264
|
-
"kimi-k2-0711-preview": 4,
|
1265
1269
|
"kimi-k2-turbo-preview": 4,
|
1270
|
+
|
1266
1271
|
"kimi-k2-250711": 4,
|
1272
|
+
"kimi-k2-0711-preview": 4,
|
1273
|
+
"kimi-k2-instruct-0711": 4,
|
1274
|
+
|
1267
1275
|
"kimi-k2-250905": 4,
|
1268
1276
|
"kimi-k2-0905-preview": 4,
|
1277
|
+
"kimi-k2-instruct-0905": 4,
|
1269
1278
|
|
1270
1279
|
"moonshot-v1-8k": 5,
|
1271
1280
|
"moonshot-v1-32k": 4,
|
@@ -1435,7 +1444,7 @@ COMPLETION_RATIO = {
|
|
1435
1444
|
"gemini-2.5-pro-think": 8,
|
1436
1445
|
|
1437
1446
|
"gemini-2.5-pro-thinking": 8,
|
1438
|
-
"gemini-2.5-pro-exp-03-25-thinking":8,
|
1447
|
+
"gemini-2.5-pro-exp-03-25-thinking": 8,
|
1439
1448
|
"gemini-2.5-pro-preview-03-25-thinking": 8,
|
1440
1449
|
"gemini-2.5-pro-nothinking": 8,
|
1441
1450
|
|
@@ -1450,7 +1459,6 @@ COMPLETION_RATIO = {
|
|
1450
1459
|
"gemini-2.5-flash-preview-05-20": 8.4,
|
1451
1460
|
"gemini-2.5-flash-preview-05-20-nothinking": 8.4,
|
1452
1461
|
|
1453
|
-
|
1454
1462
|
"hunyuan-a52b-instruct": 5,
|
1455
1463
|
"qwen2.5-coder-32b-instruct": 3,
|
1456
1464
|
|
@@ -1571,6 +1579,9 @@ COMPLETION_RATIO = {
|
|
1571
1579
|
|
1572
1580
|
"doubao-1-5-thinking-vision-pro-250428": 3,
|
1573
1581
|
|
1582
|
+
"seed-oss-36b-instruct": 4,
|
1583
|
+
"ling-mini-2.0": 4,
|
1584
|
+
|
1574
1585
|
"deepseek-prover-v2-671b": 4,
|
1575
1586
|
"deepseek-r1:1.5b": 4,
|
1576
1587
|
"deepseek-r1-distill-qwen-1.5b": 4,
|
@@ -1662,7 +1673,9 @@ COMPLETION_RATIO = {
|
|
1662
1673
|
"meta-llama/Llama-3.2-11B-Vision-Instruct": 4,
|
1663
1674
|
|
1664
1675
|
"mistral-small-3.1-24b-instruct": 0.1,
|
1665
|
-
"mistral-small-24b-instruct-2501": 3
|
1676
|
+
"mistral-small-24b-instruct-2501": 3,
|
1677
|
+
|
1678
|
+
|
1666
1679
|
|
1667
1680
|
}
|
1668
1681
|
|