MeUtils 2025.4.30.17.40.5__py3-none-any.whl → 2025.5.7.18.34.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/METADATA +263 -263
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/RECORD +23 -20
- examples/_openaisdk/openai_chatfire.py +2 -2
- meutils/ai_audio/tts/openai_tts.py +3 -1
- meutils/apis/fal/kling_videos.py +82 -0
- meutils/apis/fal/videos.py +4 -1
- meutils/apis/jimeng/common.py +20 -8
- meutils/apis/jimeng/files.py +18 -10
- meutils/apis/jimeng/images.py +6 -14
- meutils/apis/jimeng/videos.py +53 -197
- meutils/apis/jimeng/videos_videos.py +334 -0
- meutils/apis/oneapi/channel.py +43 -0
- meutils/apis/oneapi/system.py +24 -0
- meutils/apis/volcengine_apis/images.py +3 -0
- meutils/data/VERSION +1 -1
- meutils/llm/completions/chat_spark.py +10 -16
- meutils/llm/openai_polling/chat.py +1 -4
- meutils/schemas/oneapi/common.py +16 -3
- meutils/schemas/video_types.py +22 -3
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/LICENSE +0 -0
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/WHEEL +0 -0
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/entry_points.txt +0 -0
- {MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/top_level.txt +0 -0
meutils/apis/oneapi/channel.py
CHANGED
@@ -158,3 +158,46 @@ if __name__ == '__main__':
|
|
158
158
|
arun(create_or_update_channel(tokens, base_url))
|
159
159
|
# arun(create_or_update_channel(tokens))
|
160
160
|
# # arun(delete_channel(range(10000, 20000)))
|
161
|
+
|
162
|
+
|
163
|
+
"""
|
164
|
+
API_KEY=6c255307-7b4d-4be8-984b-5440a3e867eb
|
165
|
+
curl --location --request POST 'https://api.ffire.cc/api/channel/' \
|
166
|
+
--header 'new-api-user: 1' \
|
167
|
+
--header 'Authorization: Bearer 20ff7099a62f441287f47c86431a7f12' \
|
168
|
+
--header 'User-Agent: Apifox/1.0.0 (https://apifox.com)' \
|
169
|
+
--header 'content-type: application/json' \
|
170
|
+
--data-raw '{
|
171
|
+
"type": 8,
|
172
|
+
"key": "${API_KEY}",
|
173
|
+
"openai_organization": "",
|
174
|
+
"test_model": "",
|
175
|
+
"status": 1,
|
176
|
+
"name": "火山-超刷",
|
177
|
+
"weight": 0,
|
178
|
+
"created_time": 1746166915,
|
179
|
+
"test_time": 1746156171,
|
180
|
+
"response_time": 878,
|
181
|
+
"base_url": "https://ark.cn-beijing.volces.com/api/v3/chat/completions",
|
182
|
+
"other": "",
|
183
|
+
"balance": 0,
|
184
|
+
"balance_updated_time": 0,
|
185
|
+
"models": "doubao-1.5-vision-pro-250328,deepseek-v3,deepseek-v3-0324,deepseek-v3-250324,deepseek-v3-8k,deepseek-v3-128k,deepseek-chat,deepseek-chat-8k,deepseek-chat-64k,deepseek-chat-164k,deepseek-chat:function,deepseek-vl2,deepseek-ai/deepseek-vl2,deepseek-r1,deepseek-r1-8k,deepseek-reasoner,deepseek-reasoner-8k,deepseek-r1-250120,deepseek-search,deepseek-r1-search,deepseek-reasoner-search,deepseek-r1-think,deepseek-reasoner-think,deepseek-r1-plus,deepseek-r1:1.5b,deepseek-r1-lite,deepseek-r1-distill-qwen-1.5b,deepseek-r1:7b,deepseek-r1-distill-qwen-7b,deepseek-r1:8b,deepseek-r1-distill-llama-8b,deepseek-r1:14b,deepseek-r1-distill-qwen-14b,deepseek-r1:32b,deepseek-r1-distill-qwen-32b,deepseek-r1:70b,deepseek-r1-distill-llama-70b,deepseek-r1-metasearch,doubao-1-5-pro-32k,doubao-1-5-pro-32k-250115,doubao-1-5-pro-256k,doubao-1-5-pro-256k-250115,doubao-1-5-vision-pro-32k,doubao-1-5-vision-pro-32k-250115,doubao-lite-128k,doubao-lite-32k,doubao-lite-32k-character,doubao-lite-4k,doubao-1.5-lite-32k,doubao-pro-4k,doubao-pro-32k,doubao-pro-32k-character,doubao-pro-128k,doubao-pro-256k,doubao-1.5-pro-32k,doubao-1.5-pro-256k,doubao-1.5-vision-pro-32k,doubao-vision-lite-32k,doubao-vision-pro-32k,doubao-1-5-pro-thinking,doubao-1-5-vision-thinking,doubao-1-5-thinking-pro-250415,doubao-1-5-thinking-pro-vision,doubao-1-5-thinking-pro-vision-250415,doubao-1-5-thinking-pro-m-250415,moonshot-v1-8k,moonshot-v1-32k,moonshot-v1-128k",
|
186
|
+
"group": "default,deepseek,volcengine",
|
187
|
+
"used_quota": 0,
|
188
|
+
"model_mapping": "{\n \"deepseek-r1\": \"deepseek-r1-250120\",\n \"deepseek-reasoner\": \"deepseek-r1-250120\",\n \"deepseek-v3-0324\": \"deepseek-v3-250324\",\n \"deepseek-v3\": \"deepseek-v3-250324\",\n \"deepseek-chat\": \"deepseek-v3-250324\",\n \"doubao-1-5-vision-pro-32k\": \"doubao-1-5-vision-pro-32k-250115\",\n \"doubao-1.5-vision-pro-32k\": \"doubao-1-5-vision-pro-32k-250115\",\n \"doubao-pro-32k\": \"doubao-1-5-pro-32k-250115\",\n \"doubao-pro-256k\": \"doubao-1-5-pro-256k-250115\",\n \"doubao-1.5-lite-32k\": \"doubao-1-5-lite-32k-250115\",\n \"doubao-lite-4k\": \"doubao-1-5-lite-32k-250115\",\n \"doubao-lite-32k\": \"doubao-1-5-lite-32k-250115\",\n \"doubao-lite-128k\": \"doubao-lite-128k-240828\",\n \"doubao-pro-128k\": \"doubao-1-5-pro-256k-250115\",\n \"doubao-1.5-lite\": \"doubao-1-5-lite-32k-250115\",\n \"doubao-vision-lite-32k\": \"doubao-vision-lite-32k-241015\",\n \"doubao-vision-pro-32k\": \"doubao-1-5-vision-pro-32k-250115\",\n \"doubao-1.5-pro-32k\": \"doubao-1-5-pro-32k-250115\",\n \"doubao-1.5-pro-256k\": \"doubao-1-5-pro-256k-250115\",\n \"doubao-1-5-thinking-pro\": \"doubao-1-5-thinking-pro-250415\",\n \"doubao-1-5-thinking-pro-vision\": \"doubao-1-5-thinking-pro-vision-250415\"\n}",
|
189
|
+
"status_code_mapping": "",
|
190
|
+
"priority": 999,
|
191
|
+
"auto_ban": 1,
|
192
|
+
"other_info": "",
|
193
|
+
"settings": "",
|
194
|
+
"tag": "火山",
|
195
|
+
"setting": null,
|
196
|
+
"param_override": null,
|
197
|
+
"groups": [
|
198
|
+
"default",
|
199
|
+
"deepseek",
|
200
|
+
"volcengine"
|
201
|
+
]
|
202
|
+
}'
|
203
|
+
"""
|
@@ -0,0 +1,24 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : system
|
5
|
+
# @Time : 2025/4/30 18:06
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
from meutils.pipe import *
|
12
|
+
|
13
|
+
# curl 'https://usa.chatfire.cn/api/option/' \
|
14
|
+
# -X 'PUT' \
|
15
|
+
# -H 'New-API-User: 1' \
|
16
|
+
# -H 'sec-ch-ua-platform: "macOS"' \
|
17
|
+
# -H 'Cache-Control: no-store' \
|
18
|
+
# -H 'Referer: https://usa.chatfire.cn/setting?tab=operation' \
|
19
|
+
# -H 'sec-ch-ua: "Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"' \
|
20
|
+
# -H 'sec-ch-ua-mobile: ?0' \
|
21
|
+
# -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36' \
|
22
|
+
# -H 'Accept: application/json, text/plain, */*' \
|
23
|
+
# -H 'Content-Type: application/json' \
|
24
|
+
# --data-raw '{"key":"LogConsumeEnabled","value":"false"}'
|
@@ -15,12 +15,15 @@ https://www.volcengine.com/docs/6791/1384311
|
|
15
15
|
import os
|
16
16
|
|
17
17
|
from meutils.pipe import *
|
18
|
+
from meutils.decorators.retry import retrying
|
19
|
+
|
18
20
|
from meutils.io.files_utils import to_url
|
19
21
|
from meutils.schemas.image_types import ImageRequest, ImagesResponse
|
20
22
|
|
21
23
|
from volcengine.visual.VisualService import VisualService
|
22
24
|
|
23
25
|
|
26
|
+
@retrying(min=3, max=5)
|
24
27
|
async def generate(request: ImageRequest, token: Optional[str] = None):
|
25
28
|
"""
|
26
29
|
|
meutils/data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
2025.
|
1
|
+
2025.05.07.18.34.50
|
@@ -12,6 +12,7 @@
|
|
12
12
|
from meutils.pipe import *
|
13
13
|
from meutils.io.openai_files import file_extract, guess_mime_type
|
14
14
|
from meutils.str_utils.json_utils import json_path
|
15
|
+
from meutils.str_utils.regular_expression import parse_url
|
15
16
|
|
16
17
|
from meutils.llm.clients import AsyncOpenAI, zhipuai_client
|
17
18
|
from meutils.llm.openai_utils import to_openai_params
|
@@ -26,26 +27,12 @@ class Completions(object):
|
|
26
27
|
|
27
28
|
async def create(self, request: CompletionRequest):
|
28
29
|
logger.debug(request.last_user_content)
|
30
|
+
logger.debug(parse_url(request.last_assistant_content))
|
29
31
|
|
30
32
|
if request.last_user_content.startswith("http"): # 文件问答-单轮
|
31
33
|
file_url, *texts = request.last_user_content.split(maxsplit=1) + ["总结下"]
|
32
34
|
text = texts[0]
|
33
35
|
|
34
|
-
# # 图片:走4v
|
35
|
-
# if guess_mime_type(file_url).startswith("image"):
|
36
|
-
# request.model = "glm-4v-flash"
|
37
|
-
# request.messages = [
|
38
|
-
# {
|
39
|
-
# 'role': 'user',
|
40
|
-
# 'content': [{"type": "image_url", "image_url": {"url": file_url}}]
|
41
|
-
# }
|
42
|
-
# ]
|
43
|
-
# {
|
44
|
-
# "type": "image_url",
|
45
|
-
# "image_url": {
|
46
|
-
# "url": {
|
47
|
-
# "url":
|
48
|
-
|
49
36
|
file_content = await file_extract(file_url)
|
50
37
|
|
51
38
|
request.messages = [
|
@@ -59,7 +46,7 @@ class Completions(object):
|
|
59
46
|
|
60
47
|
if guess_mime_type(url).startswith("image"): # 图片问答
|
61
48
|
|
62
|
-
request.model = "glm-4v-flash"
|
49
|
+
request.model = "glm-4v-flash" if "image" not in request.model else request.model
|
63
50
|
for i, message in enumerate(request.messages):
|
64
51
|
if message.get("role") == "user":
|
65
52
|
user_contents = message.get("content")
|
@@ -91,6 +78,13 @@ class Completions(object):
|
|
91
78
|
request.messages = request.messages[-i:]
|
92
79
|
break # 截断:从最新的文件开始
|
93
80
|
|
81
|
+
elif image_urls := parse_url(request.last_assistant_content):
|
82
|
+
image_url = image_urls[-1]
|
83
|
+
request.messages[-1]["content"] = [
|
84
|
+
{"type": "text", "text": request.last_user_content},
|
85
|
+
{"type": "image_url", "image_url": {"url": image_url}}
|
86
|
+
]
|
87
|
+
|
94
88
|
logger.debug(request.model_dump_json(indent=4))
|
95
89
|
|
96
90
|
data = to_openai_params(request)
|
@@ -85,12 +85,9 @@ class Completions(object):
|
|
85
85
|
if not request.reasoning_effort: # 默认关闭思考
|
86
86
|
data['reasoning_effort'] = "none"
|
87
87
|
|
88
|
-
if "gemini-2.5-pro" in request.model: ####### 关闭失效
|
89
|
-
data['reasoning_effort'] = "low"
|
90
|
-
|
91
88
|
if "thinking" in request.model:
|
92
89
|
data['model'] = data['model'].removesuffix("-thinking") # 开启思考
|
93
|
-
data['reasoning_effort'] =
|
90
|
+
data['reasoning_effort'] = 'low'
|
94
91
|
|
95
92
|
logger.debug(data)
|
96
93
|
return await self.client.chat.completions.create(**data)
|
meutils/schemas/oneapi/common.py
CHANGED
@@ -569,6 +569,7 @@ MODEL_RATIO = {
|
|
569
569
|
"abab5.5s-chat": 2.5,
|
570
570
|
|
571
571
|
# deepseek
|
572
|
+
"deepseek-prover-v2-671b": 2,
|
572
573
|
"deepseek-v3": 1,
|
573
574
|
"deepseek-v3-0324": 1,
|
574
575
|
"deepseek-v3-250324": 1,
|
@@ -646,6 +647,8 @@ MODEL_RATIO = {
|
|
646
647
|
"doubao-1.5-pro-256k": 5 / 2,
|
647
648
|
|
648
649
|
"doubao-1.5-vision-pro-32k": 1.5,
|
650
|
+
"doubao-1.5-vision-pro-250328": 1.5,
|
651
|
+
|
649
652
|
"doubao-vision-lite-32k": 0.75,
|
650
653
|
"doubao-vision-pro-32k": 1.5,
|
651
654
|
|
@@ -765,6 +768,7 @@ MODEL_RATIO = {
|
|
765
768
|
"gemini-2.0-pro-exp-02-05": 0.625,
|
766
769
|
"gemini-2.5-pro-exp-03-25": 0.625,
|
767
770
|
"gemini-2.5-pro-preview-03-25": 0.625,
|
771
|
+
"gemini-2.5-pro-preview-05-06": 0.625,
|
768
772
|
|
769
773
|
"gemini-1.5-pro-001": 1.25,
|
770
774
|
"gemini-1.5-pro-002": 1.25,
|
@@ -887,8 +891,9 @@ MODEL_RATIO = {
|
|
887
891
|
"Qwen/Qwen2-VL-72B-Instruct": 2,
|
888
892
|
|
889
893
|
# 临时
|
890
|
-
"
|
891
|
-
"microsoft/phi-4": 0.035
|
894
|
+
"microsoft/phi-4": 0.035,
|
895
|
+
"microsoft/phi-4-reasoning": 0.035,
|
896
|
+
"microsoft/phi-4-reasoning-plus": 0.035 * 2,
|
892
897
|
"mistral-small-3.1-24b-instruct": 0.1,
|
893
898
|
"mistral-small-24b-instruct-2501": 0.1,
|
894
899
|
|
@@ -1032,6 +1037,7 @@ COMPLETION_RATIO = {
|
|
1032
1037
|
"gemini-2.0-pro-exp-02-05": 5,
|
1033
1038
|
"gemini-2.5-pro-exp-03-25": 8,
|
1034
1039
|
"gemini-2.5-pro-preview-03-25": 8,
|
1040
|
+
"gemini-2.5-pro-preview-05-06": 8,
|
1035
1041
|
|
1036
1042
|
"gemma2-9b-it": 4,
|
1037
1043
|
"gemma2-27b-it": 4,
|
@@ -1113,6 +1119,8 @@ COMPLETION_RATIO = {
|
|
1113
1119
|
"doubao-1.5-pro-256k": 3,
|
1114
1120
|
|
1115
1121
|
"doubao-1.5-vision-pro-32k": 3,
|
1122
|
+
"doubao-1.5-vision-pro-250328": 3,
|
1123
|
+
|
1116
1124
|
"doubao-1-5-vision-pro-32k": 3,
|
1117
1125
|
"doubao-1-5-vision-pro-32k-250115": 3,
|
1118
1126
|
|
@@ -1132,6 +1140,7 @@ COMPLETION_RATIO = {
|
|
1132
1140
|
"doubao-1-5-thinking-pro-vision-250415": 4,
|
1133
1141
|
"doubao-1-5-thinking-pro-m-250415": 4,
|
1134
1142
|
|
1143
|
+
"deepseek-prover-v2-671b": 4,
|
1135
1144
|
"deepseek-r1:1.5b": 4,
|
1136
1145
|
"deepseek-r1-distill-qwen-1.5b": 4,
|
1137
1146
|
"deepseek-r1:7b": 4,
|
@@ -1360,7 +1369,9 @@ REDIRECT_MODEL = {
|
|
1360
1369
|
"tune-mythomax-l2-13b": "rohan/tune-mythomax-l2-13b",
|
1361
1370
|
"tune-wizardlm-2-8x22b": "rohan/tune-wizardlm-2-8x22b",
|
1362
1371
|
|
1363
|
-
"microsoft/phi-4":
|
1372
|
+
"microsoft/phi-4": 5,
|
1373
|
+
"microsoft/phi-4-reasoning": 5,
|
1374
|
+
"microsoft/phi-4-reasoning-plus": 5,
|
1364
1375
|
|
1365
1376
|
}
|
1366
1377
|
|
@@ -1398,3 +1409,5 @@ if __name__ == '__main__':
|
|
1398
1409
|
print(bjson({k: v * 6 for k, v in MODEL_RATIO.items() if k.startswith('claude')}))
|
1399
1410
|
print([k for k in MODEL_RATIO if k.startswith('gpt-4.1')] | xjoin(","))
|
1400
1411
|
print([k for k in MODEL_RATIO if k.startswith('qwen3')] | xjoin(","))
|
1412
|
+
|
1413
|
+
print([k for k in MODEL_RATIO if k.startswith(('deepseek', 'doubao', 'moon'))] | xjoin(","))
|
meutils/schemas/video_types.py
CHANGED
@@ -12,15 +12,17 @@ from meutils.pipe import *
|
|
12
12
|
|
13
13
|
|
14
14
|
class VideoRequest(BaseModel):
|
15
|
-
model: Literal["cogvideox-flash", "cogvideox"] = "cogvideox-flash"
|
15
|
+
model: Union[str, Literal["cogvideox-flash", "cogvideox"]] = "cogvideox-flash"
|
16
16
|
|
17
17
|
prompt: str = "比得兔开小汽车,游走在马路上,脸上的表情充满开心喜悦。"
|
18
|
+
negative_prompt: Optional[str] = None
|
18
19
|
|
19
20
|
"""
|
20
21
|
提供基于其生成内容的图像。如果传入此参数,系统将以该图像为基础进行操作。支持通过URL或Base64编码传入图片。
|
21
22
|
图片要求如下:图片支持.png、jpeg、.jpg 格式、图片大小:不超过5M。image_url和prompt二选一或者同时传入。
|
22
23
|
"""
|
23
24
|
image_url: Optional[str] = None
|
25
|
+
tail_image_url: Optional[str] = None
|
24
26
|
|
25
27
|
"""
|
26
28
|
输出模式,默认为 "quality"。 "quality":质量优先,生成质量高。 "speed":速度优先,生成时间更快,质量相对降低。
|
@@ -31,10 +33,14 @@ class VideoRequest(BaseModel):
|
|
31
33
|
"""是否生成 AI 音效。默认值: False(不生成音效)。"""
|
32
34
|
with_audio: bool = True
|
33
35
|
|
36
|
+
cfg_scale: Optional[float] = None
|
37
|
+
|
34
38
|
"""
|
35
39
|
默认值: 若不指定,默认生成视频的短边为 1080,长边根据原图片比例缩放。最高支持 4K 分辨率。
|
36
40
|
分辨率选项:720x480、1024x1024、1280x960、960x1280、1920x1080、1080x1920、2048x1080、3840x2160
|
37
41
|
"""
|
42
|
+
aspect_ratio: Union[str, Literal["1:1", "21:9", "16:9", "9:16", "4:3", "3:4"]] = "16:9"
|
43
|
+
|
38
44
|
size: Literal[
|
39
45
|
'720x480',
|
40
46
|
'1024x1024',
|
@@ -62,7 +68,20 @@ class FalVideoRequest(BaseModel):
|
|
62
68
|
sync_mode: Union[str, Literal["cut_off", "loop", "bounce"]] = "cut_off"
|
63
69
|
|
64
70
|
|
65
|
-
class
|
71
|
+
class FalKlingVideoRequest(BaseModel):
|
72
|
+
model: Union[
|
73
|
+
str, Literal["fal-ai/kling-video/v1/standard/text-to-video",]] = 'fal-ai/kling-video/v1/standard/text-to-video'
|
74
|
+
|
75
|
+
prompt: Optional[str] = None
|
76
|
+
duration: Optional[float] = 5.0
|
77
|
+
video_url: Optional[str] = None
|
78
|
+
audio_url: Optional[str] = None
|
79
|
+
image_url: Optional[str] = None
|
80
|
+
|
81
|
+
sync_mode: Union[str, Literal["cut_off", "loop", "bounce"]] = "cut_off"
|
82
|
+
|
83
|
+
|
84
|
+
class LipsyncVideoRequest(BaseModel):
|
66
85
|
model: Union[str, Literal[
|
67
86
|
"latentsync", "sync-lipsync",
|
68
87
|
"lip_sync_avatar_std", "lip_sync_avatar_lively"
|
@@ -77,4 +96,4 @@ class LipsyncVideoRquest(BaseModel):
|
|
77
96
|
|
78
97
|
|
79
98
|
if __name__ == '__main__':
|
80
|
-
print(
|
99
|
+
print(LipsyncVideoRequest())
|
File without changes
|
File without changes
|
{MeUtils-2025.4.30.17.40.5.dist-info → MeUtils-2025.5.7.18.34.50.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|