MeUtils 2025.8.27.12.16.3__py3-none-any.whl → 2025.8.29.15.13.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/_openaisdk/openai_router.py +76 -0
- examples/arq_demo//345/274/202/346/255/245.py +32 -0
- meutils/apis/chatglm/glm_video_api.py +3 -3
- meutils/apis/fal/images.py +1 -0
- meutils/apis/google/chat.py +87 -43
- meutils/apis/google/images.py +99 -7
- meutils/apis/images/__init__.py +0 -1
- meutils/apis/images/generations.py +31 -20
- meutils/apis/volcengine_apis/images.py +11 -12
- meutils/apis/volcengine_apis/videos.py +2 -3
- meutils/data/VERSION +1 -1
- meutils/io/files_utils.py +29 -8
- meutils/llm/check_utils.py +1 -1
- meutils/llm/models/openrouter.py +68 -0
- meutils/llm/openai_utils/adapters.py +17 -6
- meutils/schemas/image_types.py +33 -21
- meutils/schemas/oneapi/common.py +14 -6
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/METADATA +264 -264
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/RECORD +23 -21
- examples/_openaisdk/open_router.py +0 -49
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/WHEEL +0 -0
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/entry_points.txt +0 -0
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/licenses/LICENSE +0 -0
- {meutils-2025.8.27.12.16.3.dist-info → meutils-2025.8.29.15.13.52.dist-info}/top_level.txt +0 -0
meutils/io/files_utils.py
CHANGED
@@ -152,8 +152,11 @@ async def to_url(
|
|
152
152
|
headers: Optional[dict] = None,
|
153
153
|
|
154
154
|
content_type: Optional[str] = None,
|
155
|
-
mime_type: Optional[str] = None
|
155
|
+
mime_type: Optional[str] = None,
|
156
|
+
|
156
157
|
):
|
158
|
+
if not file: return
|
159
|
+
|
157
160
|
content_type = content_type or mime_type
|
158
161
|
|
159
162
|
if isinstance(file, list):
|
@@ -161,13 +164,29 @@ async def to_url(
|
|
161
164
|
urls = await asyncio.gather(*tasks)
|
162
165
|
return urls
|
163
166
|
|
164
|
-
if not file: return
|
165
|
-
|
166
167
|
file = await to_bytes(file, headers=headers)
|
167
168
|
file_url = await Minio().upload(file, filename, content_type=content_type)
|
168
169
|
return file_url
|
169
170
|
|
170
171
|
|
172
|
+
"""
|
173
|
+
request = ImageProcess(
|
174
|
+
# model="hunyuan-remove-watermark",
|
175
|
+
|
176
|
+
# model="remove-watermark",
|
177
|
+
model="clarity",
|
178
|
+
# model="expand",
|
179
|
+
# model="rmbg-2.0",
|
180
|
+
|
181
|
+
image=url,
|
182
|
+
# mask=url,
|
183
|
+
|
184
|
+
# response_format="b64_json"
|
185
|
+
)
|
186
|
+
arun(edit_image(request))
|
187
|
+
"""
|
188
|
+
|
189
|
+
|
171
190
|
async def to_base64(file: Union[UploadFile, str, bytes, list], content_type: Optional[str] = None):
|
172
191
|
if isinstance(file, list):
|
173
192
|
tasks = [to_base64(_, content_type) for _ in file]
|
@@ -327,9 +346,11 @@ if __name__ == '__main__':
|
|
327
346
|
# url = "https://lmdbk.com/5.mp4"
|
328
347
|
# url = "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4"
|
329
348
|
# content = requests.get(url).content
|
349
|
+
#
|
350
|
+
# url = "https://fal.media/files/koala/8teUPbRRMtAUTORDvqy0l.mp4"
|
351
|
+
#
|
352
|
+
# with timer():
|
353
|
+
# # arun(get_file_duration(content=content))
|
354
|
+
# arun(get_file_duration(url=url))
|
330
355
|
|
331
|
-
|
332
|
-
|
333
|
-
with timer():
|
334
|
-
# arun(get_file_duration(content=content))
|
335
|
-
arun(get_file_duration(url=url))
|
356
|
+
r = arun(to_url([]))
|
meutils/llm/check_utils.py
CHANGED
@@ -316,7 +316,7 @@ async def check_token_for_zhipu(api_key, threshold: float = 1, resource_package_
|
|
316
316
|
|
317
317
|
|
318
318
|
@retrying()
|
319
|
-
@rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
|
319
|
+
# @rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
|
320
320
|
async def check_token_for_fal(token, threshold: float = 0):
|
321
321
|
try:
|
322
322
|
# data = await AsyncClient(key=token).upload(b'', '', '')
|
@@ -0,0 +1,68 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : openrouter
|
5
|
+
# @Time : 2025/8/28 08:59
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
from meutils.pipe import *
|
12
|
+
|
13
|
+
from meutils.llm.clients import OpenAI
|
14
|
+
|
15
|
+
models_mapping = {
|
16
|
+
"glm-4.5": "zai-org/GLM-4.5",
|
17
|
+
"glm-4.5v": "zai-org/GLM-4.5V",
|
18
|
+
|
19
|
+
"deepseek-v3": "deepseek-ai/DeepSeek-V3",
|
20
|
+
"deepseek-v3-0324": "deepseek-ai/DeepSeek-V3",
|
21
|
+
"deepseek-v3-250324": "deepseek-ai/DeepSeek-V3",
|
22
|
+
"deepseek-chat": "deepseek-ai/DeepSeek-V3",
|
23
|
+
"deepseek-v3.1": "deepseek-ai/DeepSeek-V3.1",
|
24
|
+
"deepseek-v3-1-250821": "deepseek-ai/DeepSeek-V3.1",
|
25
|
+
|
26
|
+
"qwen3-32b": "Qwen/Qwen3-32B",
|
27
|
+
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
|
28
|
+
"deepseek-r1-250528": "deepseek-ai/DeepSeek-R1",
|
29
|
+
"deepseek-reasoner": "deepseek-ai/DeepSeek-R1",
|
30
|
+
"qwen2.5-72b-instruct": "Qwen/Qwen2.5-72B-Instruct-128K",
|
31
|
+
"kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
|
32
|
+
"moonshotai/kimi-k2-instruct": "moonshotai/Kimi-K2-Instruct",
|
33
|
+
"qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
|
34
|
+
"qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
|
35
|
+
"qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
|
36
|
+
"minimax-m1-80k": "MiniMaxAI/MiniMax-M1-80k",
|
37
|
+
"qvq-72b-preview": "Qwen/QVQ-72B-Preview",
|
38
|
+
"qwen2.5-7b-instruct": "Qwen/Qwen2.5-7B-Instruct",
|
39
|
+
"deepseek-r1:1.5b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
40
|
+
"deepseek-r1-distill-qwen-1.5b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
41
|
+
"deepseek-r1:7b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
42
|
+
"deepseek-r1-distill-qwen-7b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
43
|
+
"deepseek-r1:8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
44
|
+
"deepseek-r1-distill-llama-8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
|
45
|
+
"qwen2.5-32b-instruct": "Qwen/Qwen2.5-32B-Instruct"
|
46
|
+
|
47
|
+
}
|
48
|
+
|
49
|
+
|
50
|
+
def get_models_mapping():
|
51
|
+
client = OpenAI(
|
52
|
+
api_key=os.getenv("OPENROUTER_API_KEY"),
|
53
|
+
base_url=os.getenv("OPENROUTER_BASE_URL"),
|
54
|
+
)
|
55
|
+
|
56
|
+
models = client.models.list().data
|
57
|
+
models = {
|
58
|
+
m.id.lower().split('/', maxsplit=1)[-1].removesuffix(":free"): m.id
|
59
|
+
for m in models
|
60
|
+
if m.id.lower().endswith(':free')
|
61
|
+
}
|
62
|
+
return {**models, **models_mapping}
|
63
|
+
|
64
|
+
|
65
|
+
if __name__ == '__main__':
|
66
|
+
data = get_models_mapping()
|
67
|
+
print(bjson(data))
|
68
|
+
print(','.join(data))
|
@@ -14,7 +14,7 @@ from meutils.io.files_utils import to_url, to_url_fal
|
|
14
14
|
from meutils.str_utils.json_utils import json_path
|
15
15
|
from meutils.llm.openai_utils import create_chat_completion
|
16
16
|
from meutils.schemas.openai_types import CompletionRequest, ChatCompletion
|
17
|
-
from meutils.schemas.image_types import ImageRequest
|
17
|
+
from meutils.schemas.image_types import ImageRequest, ImagesResponse
|
18
18
|
from meutils.llm.openai_utils import chat_completion, chat_completion_chunk, create_chat_completion_chunk
|
19
19
|
from meutils.str_utils import parse_url, parse_command_string
|
20
20
|
|
@@ -60,8 +60,8 @@ async def chat_for_image(
|
|
60
60
|
urls = await to_url_fal(image_urls, content_type="image/png") # 国外友好
|
61
61
|
image = urls
|
62
62
|
else:
|
63
|
-
urls = await to_url(image_urls, content_type="image/png")
|
64
|
-
image = urls
|
63
|
+
urls = await to_url(image_urls, content_type="image/png") # 数组
|
64
|
+
image = urls
|
65
65
|
|
66
66
|
request = ImageRequest(
|
67
67
|
model=request.model,
|
@@ -82,6 +82,7 @@ async def chat_for_image(
|
|
82
82
|
logger.debug(request)
|
83
83
|
|
84
84
|
if not generate: return
|
85
|
+
|
85
86
|
future_task = asyncio.create_task(generate(request)) # 异步执行
|
86
87
|
|
87
88
|
async def gen():
|
@@ -89,10 +90,20 @@ async def chat_for_image(
|
|
89
90
|
await asyncio.sleep(0.05)
|
90
91
|
yield i
|
91
92
|
|
92
|
-
|
93
|
+
try:
|
94
|
+
response = await future_task
|
95
|
+
# response = await response # 注意
|
96
|
+
|
97
|
+
if not isinstance(response, dict):
|
98
|
+
response = response.model_dump()
|
99
|
+
|
100
|
+
for image in response['data']:
|
101
|
+
yield f"""\n\n"""
|
102
|
+
|
93
103
|
|
94
|
-
|
95
|
-
yield f"
|
104
|
+
except Exception as e:
|
105
|
+
# yield f"```error\n{e}\n```\n"
|
106
|
+
raise e
|
96
107
|
|
97
108
|
chunks = create_chat_completion_chunk(gen(), redirect_model=request.model)
|
98
109
|
return chunks
|
meutils/schemas/image_types.py
CHANGED
@@ -134,24 +134,31 @@ class ImageRequest(BaseModel): # openai
|
|
134
134
|
self.size = self.size if 'x' in self.size else '512x512'
|
135
135
|
|
136
136
|
@cached_property
|
137
|
-
def image_urls(self):
|
137
|
+
def image_urls(self) -> List[str]:
|
138
138
|
if self.image:
|
139
139
|
if isinstance(self.image, str):
|
140
140
|
return [self.image]
|
141
141
|
else:
|
142
142
|
return self.image
|
143
|
+
else:
|
144
|
+
image, _ = self.image_and_prompt
|
145
|
+
if image:
|
146
|
+
return [image]
|
147
|
+
|
143
148
|
return []
|
144
149
|
|
145
150
|
@cached_property
|
146
151
|
def image_and_prompt(self): # image prompt 目前是单图
|
147
152
|
if self.prompt.startswith('http') and (prompts := self.prompt.split(maxsplit=1)):
|
148
153
|
if len(prompts) == 2:
|
154
|
+
self.prompt = prompts[-1]
|
149
155
|
return prompts
|
150
156
|
else:
|
151
157
|
return prompts + [' '] # 只有 单url
|
152
158
|
|
153
159
|
elif "http" in self.prompt and (images := parse_url(self.prompt)):
|
154
|
-
|
160
|
+
self.prompt = self.prompt.replace(images[0], "")
|
161
|
+
return images[0], self.prompt
|
155
162
|
|
156
163
|
else:
|
157
164
|
return None, self.prompt
|
@@ -605,29 +612,34 @@ if __name__ == '__main__':
|
|
605
612
|
|
606
613
|
prompt = "https://oss.ffire.cc/files/kling_watermark.png 带个眼镜"
|
607
614
|
# prompt = "带个眼镜 https://oss.ffire.cc/files/kling_watermark.png"
|
608
|
-
prompt = "https://oss.ffire.cc/files/kling_watermark.png"
|
609
|
-
prompt = "画条狗"
|
615
|
+
# prompt = "https://oss.ffire.cc/files/kling_watermark.png"
|
616
|
+
# prompt = "画条狗"
|
610
617
|
|
611
618
|
request = ImageRequest(prompt=prompt)
|
612
|
-
print(request.image_and_prompt)
|
613
|
-
|
614
|
-
data = {"image[]": "xxx"}
|
615
|
-
r = ImageRequest(**data)
|
616
619
|
|
617
|
-
|
618
|
-
"model": "flux-kontext-pro",
|
619
|
-
"prompt": "a cat and a dog",
|
620
|
-
"size": "512x1024",
|
621
|
-
# "aspect_ratio": "16:9"
|
622
|
-
}
|
620
|
+
print(request.prompt)
|
623
621
|
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
"size": "5121x1024",
|
628
|
-
# "aspect_ratio": "16:9"
|
629
|
-
}
|
622
|
+
print(request.image_urls)
|
623
|
+
print(request.image_and_prompt)
|
624
|
+
print(request.prompt)
|
630
625
|
|
631
|
-
|
626
|
+
# data = {"image[]": "xxx"}
|
627
|
+
# r = ImageRequest(**data)
|
628
|
+
#
|
629
|
+
# data = {
|
630
|
+
# "model": "flux-kontext-pro",
|
631
|
+
# "prompt": "a cat and a dog",
|
632
|
+
# "size": "512x1024",
|
633
|
+
# # "aspect_ratio": "16:9"
|
634
|
+
# }
|
635
|
+
#
|
636
|
+
# dd = {
|
637
|
+
# "model": "flux-kontext-pro",
|
638
|
+
# "prompt": "a cat and a dog",
|
639
|
+
# "size": "5121x1024",
|
640
|
+
# # "aspect_ratio": "16:9"
|
641
|
+
# }
|
642
|
+
#
|
643
|
+
# r = ImageRequest(**data)
|
632
644
|
|
633
645
|
# print(ImagesResponse())
|
meutils/schemas/oneapi/common.py
CHANGED
@@ -110,11 +110,17 @@ FAL_MODELS = {
|
|
110
110
|
MODEL_PRICE = {
|
111
111
|
**FAL_MODELS,
|
112
112
|
|
113
|
+
"nano-banana": 0.04 * 3,
|
114
|
+
"gemini-2.5-flash-image": 0.04 * 3,
|
115
|
+
"gemini-2.5-flash-image-preview": 0.06,
|
116
|
+
|
113
117
|
"qwen-image": 0.05,
|
114
118
|
"qwen-image-edit": 0.05,
|
115
119
|
|
116
120
|
"wan-ai-wan2.1-t2v-14b": 1,
|
117
121
|
"wan-ai-wan2.1-t2v-14b-turbo": 1,
|
122
|
+
"wan2-1-14b-i2v-250225": 1,
|
123
|
+
"wan2-1-14b-t2v-250225": 1,
|
118
124
|
|
119
125
|
"async-task": 0.0001,
|
120
126
|
"chatfire-claude": 0.02,
|
@@ -174,9 +180,8 @@ MODEL_PRICE = {
|
|
174
180
|
"doubao-seedream-3-0-t2i-250415": 0.1,
|
175
181
|
"doubao-seededit-3-0-i2i-250628": 0.1,
|
176
182
|
|
183
|
+
"doubao-seedance-1-0-pro": 0.5, # 480 1080
|
177
184
|
"doubao-seedance-1-0-pro-250528": 0.5, # 480 1080
|
178
|
-
"api-doubao-seedance-1-0-pro-250528-1080p": 2, # 480 1080
|
179
|
-
"api-doubao-seedance-1-0-pro-250528-480p": 0.5, # 480 1080
|
180
185
|
|
181
186
|
"doubao-seedance-1-0-lite-t2v-250428": 0.4,
|
182
187
|
"doubao-seedance-1-0-lite-i2v-250428": 0.4,
|
@@ -1065,6 +1070,7 @@ MODEL_RATIO = {
|
|
1065
1070
|
"gemini-2.5-pro-preview-05-06": 0.625,
|
1066
1071
|
"gemini-2.5-pro-preview-06-05": 0.625,
|
1067
1072
|
"gemini-2.5-pro": 0.625,
|
1073
|
+
"gemini-2.5-pro-nothinking": 0.625,
|
1068
1074
|
|
1069
1075
|
"gemini-1.5-pro-001": 1.25,
|
1070
1076
|
"gemini-1.5-pro-002": 1.25,
|
@@ -1409,6 +1415,12 @@ COMPLETION_RATIO = {
|
|
1409
1415
|
"gemini-2.5-pro-preview-03-25": 8,
|
1410
1416
|
"gemini-2.5-pro-preview-05-06": 8,
|
1411
1417
|
"gemini-2.5-pro-preview-06-05": 8,
|
1418
|
+
"gemini-2.5-pro-think": 8,
|
1419
|
+
|
1420
|
+
"gemini-2.5-pro-thinking": 8,
|
1421
|
+
"gemini-2.5-pro-exp-03-25-thinking":8,
|
1422
|
+
"gemini-2.5-pro-preview-03-25-thinking": 8,
|
1423
|
+
"gemini-2.5-pro-nothinking": 8,
|
1412
1424
|
|
1413
1425
|
"gemma2-9b-it": 4,
|
1414
1426
|
"gemma2-27b-it": 4,
|
@@ -1420,11 +1432,7 @@ COMPLETION_RATIO = {
|
|
1420
1432
|
"gemini-2.5-flash-preview-05-20-thinking": 23,
|
1421
1433
|
"gemini-2.5-flash-preview-05-20": 8.4,
|
1422
1434
|
"gemini-2.5-flash-preview-05-20-nothinking": 8.4,
|
1423
|
-
"gemini-2.5-pro-think": 4,
|
1424
1435
|
|
1425
|
-
"gemini-2.5-pro-thinking": 4,
|
1426
|
-
"gemini-2.5-pro-exp-03-25-thinking": 4,
|
1427
|
-
"gemini-2.5-pro-preview-03-25-thinking": 4,
|
1428
1436
|
|
1429
1437
|
"hunyuan-a52b-instruct": 5,
|
1430
1438
|
"qwen2.5-coder-32b-instruct": 3,
|