MeUtils 2025.3.19.19.13.35__py3-none-any.whl → 2025.3.20.17.3.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/METADATA +265 -265
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/RECORD +26 -23
- examples/_openaisdk/openai_audio.py +7 -2
- meutils/apis/audio/fish.py +1 -1
- meutils/apis/baidu/bdaitpzs.py +1 -1
- meutils/apis/hunyuan/image_tools.py +1 -1
- meutils/apis/images/eidt.py +22 -12
- meutils/apis/proxy/ips.py +4 -23
- meutils/apis/textin.py +1 -1
- meutils/config_utils/lark_utils/common.py +5 -1
- meutils/data/VERSION +1 -1
- meutils/io/files_utils.py +29 -19
- meutils/llm/check_api.py +3 -1
- meutils/llm/check_utils.py +4 -1
- meutils/llm/completions/chat_gemini.py +51 -1
- meutils/llm/completions/chat_plus.py +22 -14
- meutils/llm/completions/chat_videos.py +95 -0
- meutils/llm/completions/deep2x.py +100 -0
- meutils/llm/completions/openai_gemini.py +13 -0
- meutils/oss/__init__.py +0 -6
- meutils/schemas/openai_types.py +33 -7
- meutils/str_utils/regular_expression.py +4 -0
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/LICENSE +0 -0
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/WHEEL +0 -0
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/entry_points.txt +0 -0
- {MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,95 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : chat_videos
|
5
|
+
# @Time : 2025/3/20 10:19
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
|
12
|
+
from meutils.pipe import *
|
13
|
+
from meutils.llm.clients import AsyncOpenAI
|
14
|
+
from meutils.apis.chatglm import glm_video_api # VideoRequest, create_task, get_task
|
15
|
+
from meutils.str_utils.regular_expression import parse_url
|
16
|
+
|
17
|
+
from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage
|
18
|
+
|
19
|
+
|
20
|
+
class Completions(object):
|
21
|
+
|
22
|
+
def __init__(self,
|
23
|
+
base_url: Optional[str] = None,
|
24
|
+
api_key: Optional[str] = None
|
25
|
+
):
|
26
|
+
self.client = AsyncOpenAI(
|
27
|
+
base_url=base_url,
|
28
|
+
api_key=api_key,
|
29
|
+
)
|
30
|
+
|
31
|
+
async def create(self, request: CompletionRequest):
|
32
|
+
|
33
|
+
image_url = None
|
34
|
+
prompt = request.last_user_content
|
35
|
+
if urls := parse_url(prompt):
|
36
|
+
image_url = urls[0]
|
37
|
+
prompt = prompt.replace(image_url, "")
|
38
|
+
|
39
|
+
# 创建任务
|
40
|
+
video_request = glm_video_api.VideoRequest(image_url=image_url, prompt=prompt)
|
41
|
+
response = await glm_video_api.create_task(video_request)
|
42
|
+
taskid = response.id
|
43
|
+
system_fingerprint = response.system_fingerprint
|
44
|
+
|
45
|
+
# 获取任务
|
46
|
+
for i in f"""> VideoTask(id={taskid.split('-')[-1]}, image_url={image_url}, prompt={prompt})\n""":
|
47
|
+
await asyncio.sleep(0.03)
|
48
|
+
yield i
|
49
|
+
|
50
|
+
yield f"[🤫 任务进度]("
|
51
|
+
for i in range(60):
|
52
|
+
await asyncio.sleep(3)
|
53
|
+
response = await glm_video_api.get_task(taskid, system_fingerprint)
|
54
|
+
|
55
|
+
logger.debug(response)
|
56
|
+
if response.task_status == "SUCCESS" or response.video_result:
|
57
|
+
yield ")🎉🎉🎉\n\n"
|
58
|
+
for video in response.video_result or []:
|
59
|
+
yield f"[^1]: [封面]({video.cover_image_url})\n\n"
|
60
|
+
yield f"[^2]: [视频]({video.url})\n\n"
|
61
|
+
|
62
|
+
yield f"[视频]({video.url})[^1][^2]\n\n"
|
63
|
+
yield f"[^1][^2]\n\n"
|
64
|
+
|
65
|
+
break
|
66
|
+
else:
|
67
|
+
yield "🔥"
|
68
|
+
|
69
|
+
|
70
|
+
if __name__ == '__main__':
|
71
|
+
url = "https://oss.ffire.cc/files/lipsync.mp3"
|
72
|
+
url = "https://lmdbk.com/5.mp4"
|
73
|
+
content = [
|
74
|
+
{"type": "text", "text": "总结下"},
|
75
|
+
# {"type": "image_url", "image_url": {"url": url}},
|
76
|
+
|
77
|
+
{"type": "video_url", "video_url": {"url": url}}
|
78
|
+
|
79
|
+
]
|
80
|
+
request = CompletionRequest(
|
81
|
+
# model="qwen-turbo-2024-11-01",
|
82
|
+
model="gemini-all",
|
83
|
+
# model="qwen-plus-latest",
|
84
|
+
|
85
|
+
messages=[
|
86
|
+
{
|
87
|
+
'role': 'user',
|
88
|
+
|
89
|
+
'content': content
|
90
|
+
},
|
91
|
+
|
92
|
+
],
|
93
|
+
stream=False,
|
94
|
+
)
|
95
|
+
arun(Completions().create(request))
|
@@ -0,0 +1,100 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : deepx
|
5
|
+
# @Time : 2025/3/20 08:53
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
"""
|
11
|
+
deep + claude
|
12
|
+
|
13
|
+
"""
|
14
|
+
|
15
|
+
from openai import AsyncOpenAI
|
16
|
+
|
17
|
+
from meutils.pipe import *
|
18
|
+
from meutils.decorators.retry import retrying
|
19
|
+
from meutils.io.files_utils import to_bytes
|
20
|
+
from meutils.io.openai_files import file_extract, guess_mime_type
|
21
|
+
from meutils.str_utils.json_utils import json_path
|
22
|
+
from meutils.apis.search import metaso
|
23
|
+
# from meutils.apis.chatglm import glm_video_api
|
24
|
+
|
25
|
+
from meutils.llm.clients import chatfire_client, zhipuai_client, AsyncOpenAI
|
26
|
+
from meutils.llm.openai_utils import to_openai_params
|
27
|
+
|
28
|
+
from meutils.schemas.openai_types import ChatCompletionRequest
|
29
|
+
from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, ImageRequest
|
30
|
+
|
31
|
+
|
32
|
+
class Completions(object):
|
33
|
+
|
34
|
+
def __init__(self, api_key: Optional[str] = None):
|
35
|
+
self.api_key = api_key
|
36
|
+
self.client = AsyncOpenAI(api_key=api_key)
|
37
|
+
|
38
|
+
async def create(self, request: CompletionRequest):
|
39
|
+
"""
|
40
|
+
:param request:
|
41
|
+
:return:
|
42
|
+
"""
|
43
|
+
data = to_openai_params(request)
|
44
|
+
data['model'] = 'deepseek-reasoner'
|
45
|
+
data['max_tokens'] = 1 # 火山 支持max_tokens=1输出思维链
|
46
|
+
if request.stream:
|
47
|
+
reasoning_content = ""
|
48
|
+
completions = await chatfire_client.chat.completions.create(**data)
|
49
|
+
async for chunk in completions:
|
50
|
+
yield chunk
|
51
|
+
delta = chunk.choices[0].delta
|
52
|
+
if hasattr(delta, "reasoning_content"):
|
53
|
+
reasoning_content += delta.reasoning_content
|
54
|
+
|
55
|
+
request.messages = [
|
56
|
+
{
|
57
|
+
'role': 'user',
|
58
|
+
'content': f"""<think>\n\n{reasoning_content}\n\n</think>\n\n{request.last_user_content}"""
|
59
|
+
}
|
60
|
+
]
|
61
|
+
data = to_openai_params(request)
|
62
|
+
async for chunk in await self.client.chat.completions.create(**data):
|
63
|
+
yield chunk
|
64
|
+
else:
|
65
|
+
reasoning_content = ""
|
66
|
+
completions = await chatfire_client.chat.completions.create(**data)
|
67
|
+
message = completions.choices[0].message
|
68
|
+
if hasattr(message, "reasoning_content"):
|
69
|
+
reasoning_content += message.reasoning_content
|
70
|
+
|
71
|
+
request.messages = [
|
72
|
+
{
|
73
|
+
'role': 'user',
|
74
|
+
'content': f"""<think>\n\n{reasoning_content}\n\n</think>\n\n{request.last_user_content}"""
|
75
|
+
}
|
76
|
+
]
|
77
|
+
data = to_openai_params(request)
|
78
|
+
_completions = await self.client.chat.completions.create(**data)
|
79
|
+
completions.choices[0].message.content = _completions.choices[0].message.content
|
80
|
+
yield completions
|
81
|
+
|
82
|
+
async def screate(self, request: CompletionRequest):
|
83
|
+
pass
|
84
|
+
|
85
|
+
|
86
|
+
if __name__ == '__main__':
|
87
|
+
c = Completions()
|
88
|
+
|
89
|
+
request = CompletionRequest(
|
90
|
+
# model="qwen-turbo-2024-11-01",
|
91
|
+
# model="claude-3-5-sonnet-20241022",
|
92
|
+
model="deepseek-chat",
|
93
|
+
stream=True,
|
94
|
+
|
95
|
+
messages=[{
|
96
|
+
'role': 'user',
|
97
|
+
'content': "1+1"
|
98
|
+
}])
|
99
|
+
|
100
|
+
arun(c.create(request))
|
@@ -0,0 +1,13 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : openai_gemini
|
5
|
+
# @Time : 2025/3/20 12:24
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
from meutils.pipe import *
|
12
|
+
|
13
|
+
"https://api.aiguoguo199.com/v1/images/generations"
|
meutils/oss/__init__.py
CHANGED
@@ -8,13 +8,7 @@
|
|
8
8
|
# @Software : PyCharm
|
9
9
|
# @Description :
|
10
10
|
|
11
|
-
from meutils.pipe import *
|
12
11
|
|
13
12
|
|
14
|
-
def upload(file: bytes, purpos="zhipu"):
|
15
|
-
from meutils.oss.minio_oss import Minio
|
16
|
-
from meutils.apis.chatglm.glm_video import upload_task
|
17
|
-
|
18
|
-
file_task = upload_task(file)
|
19
13
|
|
20
14
|
|
meutils/schemas/openai_types.py
CHANGED
@@ -121,9 +121,6 @@ class CompletionRequest(BaseModel):
|
|
121
121
|
def __init__(self, **kwargs):
|
122
122
|
super().__init__(**kwargs)
|
123
123
|
|
124
|
-
class Config:
|
125
|
-
extra = "allow"
|
126
|
-
|
127
124
|
@cached_property
|
128
125
|
def last_message(self):
|
129
126
|
return self.messages and self.messages[-1]
|
@@ -132,12 +129,23 @@ class CompletionRequest(BaseModel):
|
|
132
129
|
def last_user_content(self) -> str:
|
133
130
|
for i, message in enumerate(self.messages[::-1], 1):
|
134
131
|
if message.get("role") == "user":
|
135
|
-
|
136
|
-
if isinstance(
|
137
|
-
for content in
|
132
|
+
contents = message.get("content")
|
133
|
+
if isinstance(contents, list):
|
134
|
+
for content in contents:
|
138
135
|
return content.get('text', "")
|
139
136
|
else:
|
140
|
-
return str(
|
137
|
+
return str(contents)
|
138
|
+
|
139
|
+
@cached_property
|
140
|
+
def last_assistant_content(self) -> str:
|
141
|
+
for i, message in enumerate(self.messages[::-1], 1):
|
142
|
+
if message.get("role") == "assistant":
|
143
|
+
contents = message.get("content")
|
144
|
+
if isinstance(contents, list):
|
145
|
+
for content in contents:
|
146
|
+
return content.get('text', "")
|
147
|
+
else:
|
148
|
+
return str(contents)
|
141
149
|
|
142
150
|
@cached_property
|
143
151
|
def last_urls(self): # file_url 多轮对话需要 sum(request.last_urls.values(), [])
|
@@ -180,6 +188,24 @@ class CompletionRequest(BaseModel):
|
|
180
188
|
#
|
181
189
|
# return message
|
182
190
|
|
191
|
+
class Config:
|
192
|
+
extra = "allow"
|
193
|
+
|
194
|
+
json_schema_extra = {
|
195
|
+
"examples": [
|
196
|
+
{
|
197
|
+
"model": "deepseek-chat",
|
198
|
+
"messages": [
|
199
|
+
{
|
200
|
+
"role": "user",
|
201
|
+
"content": "hi"
|
202
|
+
}
|
203
|
+
],
|
204
|
+
"stream": True
|
205
|
+
},
|
206
|
+
]
|
207
|
+
}
|
208
|
+
|
183
209
|
|
184
210
|
class ChatCompletionRequest(BaseModel):
|
185
211
|
"""
|
@@ -20,6 +20,8 @@ HTML_PARSER = re.compile(r'```html(.*?)```', re.DOTALL)
|
|
20
20
|
|
21
21
|
# re.sub(r'=(.+)', r'=123','s=xxxxx')
|
22
22
|
|
23
|
+
|
24
|
+
|
23
25
|
@lru_cache()
|
24
26
|
def remove_date_suffix(filename):
|
25
27
|
"""
|
@@ -152,6 +154,8 @@ if __name__ == '__main__':
|
|
152
154
|
|
153
155
|
print(parse_url(text))
|
154
156
|
|
157
|
+
print(parse_url("[](https://oss.ffire.cc/cdn/2025-03-20/YbHhMbrXV82XGn4msunAJw)"))
|
158
|
+
|
155
159
|
# print('https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf\\n\\n'.strip(r"\n"))
|
156
160
|
|
157
161
|
# print(parse_url("http://154.3.0.117:39666/docs#/default/get_content_preview_spider_playwright_get"))
|
File without changes
|
File without changes
|
{MeUtils-2025.3.19.19.13.35.dist-info → MeUtils-2025.3.20.17.3.20.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|