MeUtils 2025.8.16.19.17.25__py3-none-any.whl → 2025.8.20.17.32.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/_openaisdk/openai_ffire.py +17 -7
- examples/_openaisdk/openai_images.py +8 -2
- examples/_openaisdk/openai_siliconflow.py +33 -4
- examples/_openaisdk/openai_zhipu.py +3 -3
- meutils/apis/chatglm/__init__.py +0 -1
- meutils/apis/chatglm/glm_video_api.py +11 -5
- meutils/apis/chatglm/zai.py +154 -0
- meutils/apis/images/generations.py +6 -0
- meutils/apis/models.py +5 -0
- meutils/apis/oneapi/channel.py +4 -4
- meutils/apis/proxy/kdlapi.py +2 -2
- meutils/apis/qwen/__init__.py +11 -0
- meutils/apis/qwen/chat.py +233 -0
- meutils/apis/siliconflow/images.py +31 -15
- meutils/data/VERSION +1 -1
- meutils/llm/check_utils.py +3 -3
- meutils/llm/openai_utils/adapters.py +16 -4
- meutils/llm/openai_utils/common.py +4 -2
- meutils/math_utils/common.py +3 -0
- meutils/oss/ali_oss.py +4 -5
- meutils/request_utils/crawler.py +7 -1
- meutils/schemas/image_types.py +5 -2
- meutils/schemas/oneapi/common.py +2 -1
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/METADATA +261 -261
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/RECORD +29 -26
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/WHEEL +0 -0
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/entry_points.txt +0 -0
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/licenses/LICENSE +0 -0
- {meutils-2025.8.16.19.17.25.dist-info → meutils-2025.8.20.17.32.39.dist-info}/top_level.txt +0 -0
@@ -14,20 +14,25 @@ from openai import OpenAI
|
|
14
14
|
from openai import OpenAI, APIStatusError
|
15
15
|
|
16
16
|
client = OpenAI(
|
17
|
-
base_url=os.getenv("FFIRE_BASE_URL"),
|
18
|
-
api_key=os.getenv("FFIRE_API_KEY")
|
19
|
-
|
17
|
+
# base_url=os.getenv("FFIRE_BASE_URL"),
|
18
|
+
# api_key=os.getenv("FFIRE_API_KEY") #+"-29463"
|
19
|
+
|
20
|
+
base_url="http://127.0.0.1:8000/v1",
|
20
21
|
|
22
|
+
api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
|
23
|
+
)
|
24
|
+
#
|
21
25
|
for i in range(1):
|
22
26
|
try:
|
23
27
|
completion = client.chat.completions.create(
|
24
28
|
# model="kimi-k2-0711-preview",
|
25
29
|
# model="deepseek-reasoner",
|
26
30
|
# model="qwen3-235b-a22b-thinking-2507",
|
27
|
-
model="qwen3-235b-a22b-instruct-2507",
|
31
|
+
# model="qwen3-235b-a22b-instruct-2507",
|
32
|
+
model="qwen-image",
|
28
33
|
|
29
34
|
messages=[
|
30
|
-
{"role": "user", "content": '
|
35
|
+
{"role": "user", "content": 'a cat'}
|
31
36
|
],
|
32
37
|
# top_p=0.7,
|
33
38
|
top_p=None,
|
@@ -40,5 +45,10 @@ for i in range(1):
|
|
40
45
|
except Exception as e:
|
41
46
|
print(e)
|
42
47
|
|
43
|
-
|
44
|
-
|
48
|
+
# model = "doubao-embedding-text-240715"
|
49
|
+
#
|
50
|
+
# r = client.embeddings.create(
|
51
|
+
# input='hi',
|
52
|
+
# model=model
|
53
|
+
# )
|
54
|
+
# print(r)
|
@@ -152,7 +152,9 @@ with timer('image'):
|
|
152
152
|
# base_url="https://oneapi.chatfire.cn/v1",
|
153
153
|
|
154
154
|
base_url=os.getenv("MODELSCOPE_BASE_URL"),
|
155
|
-
api_key=os.getenv("MODELSCOPE_API_KEY")
|
155
|
+
api_key=os.getenv("MODELSCOPE_API_KEY"),
|
156
|
+
|
157
|
+
# default_query=
|
156
158
|
|
157
159
|
)
|
158
160
|
|
@@ -209,7 +211,7 @@ with timer('image'):
|
|
209
211
|
response = client.images.generate(
|
210
212
|
model=model,
|
211
213
|
prompt=prompt,
|
212
|
-
|
214
|
+
response_format="url",
|
213
215
|
# extra_body={
|
214
216
|
# "Style": "True",
|
215
217
|
# "controls": {}, ######### 这个看起来更通用些
|
@@ -222,6 +224,10 @@ with timer('image'):
|
|
222
224
|
|
223
225
|
# size="1700x1275",
|
224
226
|
|
227
|
+
extra_headers={"X-ModelScope-Async-Mode": "false"},
|
228
|
+
|
229
|
+
|
230
|
+
|
225
231
|
extra_body={
|
226
232
|
"extra_fields": {
|
227
233
|
|
@@ -15,8 +15,8 @@ from meutils.llm.clients import OpenAI
|
|
15
15
|
|
16
16
|
|
17
17
|
client = OpenAI(
|
18
|
-
api_key=os.getenv("SILICONFLOW_API_KEY"),
|
19
|
-
|
18
|
+
# api_key=os.getenv("SILICONFLOW_API_KEY"),
|
19
|
+
api_key="sk-ugfakteneejitibfzpwttxplymratxacudosclwlvzopexwq",
|
20
20
|
base_url="https://api.siliconflow.cn/v1",
|
21
21
|
# http_client=httpx.Client(
|
22
22
|
# proxy="http://110.42.51.201:38443",
|
@@ -57,7 +57,7 @@ response = client.chat.completions.create(
|
|
57
57
|
messages=messages,
|
58
58
|
stream=True,
|
59
59
|
max_tokens=1,
|
60
|
-
extra_body={"enable_thinking": False}
|
60
|
+
# extra_body={"enable_thinking": False}
|
61
61
|
)
|
62
62
|
print(response)
|
63
63
|
for chunk in response:
|
@@ -70,10 +70,18 @@ def request_many():
|
|
70
70
|
# model='alibaba/Qwen1.5-110B-Chat',
|
71
71
|
model=model,
|
72
72
|
messages=[
|
73
|
+
{'role': 'user', 'content': "1+1"},
|
74
|
+
{'role': 'assistant', 'content': """
|
75
|
+
<think>
|
76
|
+
reasoning_content
|
77
|
+
</think>
|
78
|
+
content
|
79
|
+
""" },
|
80
|
+
|
73
81
|
{'role': 'user', 'content': "抛砖引玉是什么意思呀" * 1}
|
74
82
|
],
|
75
83
|
# messages=messages,
|
76
|
-
stream=
|
84
|
+
stream=True,
|
77
85
|
max_tokens=1,
|
78
86
|
extra_body={"enable_thinking": False}
|
79
87
|
|
@@ -81,3 +89,24 @@ def request_many():
|
|
81
89
|
print(response)
|
82
90
|
# for chunk in response:
|
83
91
|
# print(chunk)
|
92
|
+
|
93
|
+
|
94
|
+
""""
|
95
|
+
reasoning_content
|
96
|
+
content 多轮
|
97
|
+
|
98
|
+
|
99
|
+
|
100
|
+
content
|
101
|
+
<think>
|
102
|
+
1
|
103
|
+
2
|
104
|
+
3
|
105
|
+
4
|
106
|
+
reasoning_content
|
107
|
+
</think>
|
108
|
+
|
109
|
+
content
|
110
|
+
reasoning_content:
|
111
|
+
type: 'think'
|
112
|
+
"""
|
@@ -13,10 +13,10 @@ from meutils.pipe import *
|
|
13
13
|
from openai import OpenAI
|
14
14
|
from openai import OpenAI, APIStatusError
|
15
15
|
|
16
|
-
|
16
|
+
# e21bd630f681c4d90b390cd609720483.WSFVgA3KkwNCX0mN
|
17
17
|
client = OpenAI(
|
18
18
|
# base_url="https://free.chatfire.cn/v1",
|
19
|
-
api_key="
|
19
|
+
api_key="9df724995f384c2e91d673864d1d32eb.aeLMBoocPyRfGBx8",
|
20
20
|
base_url="https://open.bigmodel.cn/api/paas/v4"
|
21
21
|
|
22
22
|
# api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI3YmFmYWQzYTRmZDU0OTk3YjNmYmNmYjExMjY5NThmZiIsImV4cCI6MTczODAyNDg4MiwibmJmIjoxNzIyNDcyODgyLCJpYXQiOjE3MjI0NzI4ODIsImp0aSI6IjY5Y2ZiNzgzNjRjODQxYjA5Mjg1OTgxYmY4ODMzZDllIiwidWlkIjoiNjVmMDc1Y2E4NWM3NDFiOGU2ZmRjYjEyIiwidHlwZSI6InJlZnJlc2gifQ.u9pIfuQZ7Y00DB6x3rbWYomwQGEyYDSE-814k67SH74",
|
@@ -29,7 +29,7 @@ A Chinese beauty plays Catwoman. She is seductive. She wears a fitted black leat
|
|
29
29
|
|
30
30
|
try:
|
31
31
|
completion = client.chat.completions.create(
|
32
|
-
model="glm-4.5",
|
32
|
+
model="glm-4.5-air",
|
33
33
|
# model="xxxxxxxxxxxxx",
|
34
34
|
messages=[
|
35
35
|
{"role": "system", "content": '你是个内容审核助手'},
|
meutils/apis/chatglm/__init__.py
CHANGED
@@ -7,11 +7,13 @@
|
|
7
7
|
# @WeChat : meutils
|
8
8
|
# @Software : PyCharm
|
9
9
|
# @Description :
|
10
|
+
import httpx
|
10
11
|
|
11
12
|
from meutils.pipe import *
|
12
13
|
from meutils.decorators.retry import retrying
|
13
14
|
from meutils.notice.feishu import send_message as _send_message
|
14
15
|
from meutils.config_utils.lark_utils import get_next_token_for_polling
|
16
|
+
from meutils.apis.proxy.kdlapi import get_one_proxy
|
15
17
|
|
16
18
|
from meutils.schemas.video_types import VideoRequest
|
17
19
|
from meutils.schemas.image_types import ImageRequest, ImagesResponse
|
@@ -33,7 +35,12 @@ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?shee
|
|
33
35
|
async def create_task(request: VideoRequest, api_key: Optional[str] = None):
|
34
36
|
api_key = api_key or await get_next_token_for_polling(FEISHU_URL, from_redis=False)
|
35
37
|
|
36
|
-
|
38
|
+
proxy = await get_one_proxy()
|
39
|
+
client = ZhipuAI(
|
40
|
+
api_key=api_key,
|
41
|
+
http_client=httpx.Client(proxy=proxy)
|
42
|
+
|
43
|
+
) # 请填写您自己的APIKey
|
37
44
|
response = client.videos.generations(
|
38
45
|
model=request.model,
|
39
46
|
prompt=request.prompt,
|
@@ -102,15 +109,15 @@ async def generate(request: ImageRequest, n: int = 30): # 兼容dalle3
|
|
102
109
|
|
103
110
|
# VideoResult
|
104
111
|
if __name__ == '__main__':
|
105
|
-
api_key = "
|
112
|
+
api_key = "f0a6177640364388a6d5d82428ee93b7.2woWPO0QdF3aWbVZ"
|
106
113
|
|
107
|
-
""
|
114
|
+
# api_key = "c98aa404b0224690b211c5d1e420db2c.qGaByuJATne08QUx"
|
108
115
|
# api_key = "7d10426c06afa81e8d7401d97781249c.DbqlSsicRtaUdKXI" # 新号
|
109
116
|
# api_key = "e21bd630f681c4d90b390cd609720483.WSFVgA3Kk1wNCX0mN"
|
110
117
|
|
111
118
|
request = VideoRequest(
|
112
119
|
# model='cogvideox-flash',
|
113
|
-
model='cogvideox'
|
120
|
+
model='cogvideox-3'
|
114
121
|
|
115
122
|
)
|
116
123
|
r = arun(create_task(request, api_key=api_key))
|
@@ -119,4 +126,3 @@ if __name__ == '__main__':
|
|
119
126
|
# request = ImageRequest(prompt="https://oss.ffire.cc/files/kling_watermark.png 让这个女人笑起来")
|
120
127
|
#
|
121
128
|
# arun(generate(request, n=30))
|
122
|
-
|
@@ -0,0 +1,154 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : z.py
|
5
|
+
# @Time : 2025/8/19 08:32
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description :
|
10
|
+
|
11
|
+
|
12
|
+
from meutils.pipe import *
|
13
|
+
from meutils.caches import rcache
|
14
|
+
from meutils.db.redis_db import redis_aclient
|
15
|
+
|
16
|
+
from openai import AsyncOpenAI
|
17
|
+
from meutils.llm.openai_utils import to_openai_params, create_chat_completion_chunk
|
18
|
+
|
19
|
+
from meutils.schemas.openai_types import CompletionRequest, chat_completion_chunk, chat_completion
|
20
|
+
|
21
|
+
from meutils.decorators.retry import retrying
|
22
|
+
from meutils.config_utils.lark_utils import get_next_token_for_polling
|
23
|
+
|
24
|
+
from fake_useragent import UserAgent
|
25
|
+
|
26
|
+
ua = UserAgent()
|
27
|
+
|
28
|
+
BASE_URL = "https://chat.z.ai/api"
|
29
|
+
FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=9VvErr"
|
30
|
+
|
31
|
+
|
32
|
+
class Completions(object):
|
33
|
+
def __init__(self, api_key: Optional[str] = None):
|
34
|
+
self.api_key = api_key
|
35
|
+
|
36
|
+
async def create(self, request: CompletionRequest, token: Optional[str] = None):
|
37
|
+
token = token or await get_next_token_for_polling(FEISHU_URL)
|
38
|
+
|
39
|
+
chat_id = str(uuid.uuid4())
|
40
|
+
payload = {
|
41
|
+
"id": chat_id,
|
42
|
+
"chat_id": chat_id,
|
43
|
+
"model": "0727-360B-API",
|
44
|
+
|
45
|
+
"stream": True,
|
46
|
+
|
47
|
+
"params": {},
|
48
|
+
"features": {
|
49
|
+
"image_generation": False,
|
50
|
+
"web_search": False,
|
51
|
+
"auto_web_search": False,
|
52
|
+
"preview_mode": False,
|
53
|
+
"flags": [],
|
54
|
+
"features": [
|
55
|
+
{
|
56
|
+
"type": "mcp",
|
57
|
+
"server": "vibe-coding",
|
58
|
+
"status": "hidden"
|
59
|
+
},
|
60
|
+
{
|
61
|
+
"type": "mcp",
|
62
|
+
"server": "ppt-maker",
|
63
|
+
"status": "hidden"
|
64
|
+
},
|
65
|
+
{
|
66
|
+
"type": "mcp",
|
67
|
+
"server": "image-search",
|
68
|
+
"status": "hidden"
|
69
|
+
}
|
70
|
+
],
|
71
|
+
"enable_thinking": request.enable_thinking or False
|
72
|
+
},
|
73
|
+
|
74
|
+
"background_tasks": {
|
75
|
+
"title_generation": False,
|
76
|
+
"tags_generation": False
|
77
|
+
}
|
78
|
+
}
|
79
|
+
|
80
|
+
payload = {**request.model_dump(), **payload}
|
81
|
+
|
82
|
+
data = to_openai_params(payload)
|
83
|
+
|
84
|
+
# todo 代理
|
85
|
+
client = AsyncOpenAI(base_url=BASE_URL, api_key=token, default_headers={"X-FE-Version": "prod-fe-1.0.69"})
|
86
|
+
response = await client.chat.completions.create(**data)
|
87
|
+
response = self.do_response(response, request.stream)
|
88
|
+
|
89
|
+
# async for i in response:
|
90
|
+
# logger.debug(i)
|
91
|
+
|
92
|
+
return response
|
93
|
+
|
94
|
+
async def do_response(self, response, stream: bool):
|
95
|
+
usage = None
|
96
|
+
nostream_content = ""
|
97
|
+
nostream_reasoning_content = ""
|
98
|
+
chat_completion_chunk.model = "glm-4.5"
|
99
|
+
async for i in response:
|
100
|
+
# print(i)
|
101
|
+
|
102
|
+
delta_content = (
|
103
|
+
i.data.get("delta_content", "").split(' ')[-1]
|
104
|
+
or i.data.get("edit_content", "").split("\n")[-1]
|
105
|
+
)
|
106
|
+
if i.data.get("phase") == "thinking":
|
107
|
+
nostream_reasoning_content += delta_content
|
108
|
+
chat_completion_chunk.choices[0].delta.reasoning_content = delta_content
|
109
|
+
|
110
|
+
elif i.data.get("phase") == "answer":
|
111
|
+
nostream_content += delta_content
|
112
|
+
chat_completion_chunk.choices[0].delta.content = delta_content
|
113
|
+
|
114
|
+
else:
|
115
|
+
logger.debug(bjson(i))
|
116
|
+
|
117
|
+
if stream:
|
118
|
+
yield chat_completion_chunk
|
119
|
+
|
120
|
+
usage = usage or i.data.get("usage", "")
|
121
|
+
|
122
|
+
if not stream:
|
123
|
+
chat_completion.choices[0].message.content = nostream_content
|
124
|
+
chat_completion.choices[0].message.reasoning_content = nostream_reasoning_content
|
125
|
+
chat_completion.usage = usage
|
126
|
+
chat_completion.model = "glm-4.5"
|
127
|
+
yield chat_completion
|
128
|
+
|
129
|
+
|
130
|
+
if __name__ == '__main__':
|
131
|
+
token = "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6ImI0YThkMTI5LWY2YzgtNDM5Mi1iYzlhLWEyNjM1Nzg0ZDM5MyIsImVtYWlsIjoiemJqZ2NlZ2NsbkB0aXRrLnVrIn0.cME4z8rip8Y6mQ0q_JEoY6ywPk_7ud2BsyFHyPRhFhtzEl_uLcQEMNlop7hM_fTy0S5pS8qdLK5y7iA1it0n7g"
|
132
|
+
|
133
|
+
request = CompletionRequest(
|
134
|
+
model="glm-4.5",
|
135
|
+
messages=[
|
136
|
+
{
|
137
|
+
"role": "system",
|
138
|
+
"content": "你是gpt",
|
139
|
+
|
140
|
+
},
|
141
|
+
{
|
142
|
+
"role": "user",
|
143
|
+
"content": [{"type": "text", "text": "周杰伦"}],
|
144
|
+
# "content": "你是谁",
|
145
|
+
|
146
|
+
}
|
147
|
+
],
|
148
|
+
stream=True,
|
149
|
+
|
150
|
+
enable_thinking=True
|
151
|
+
|
152
|
+
)
|
153
|
+
|
154
|
+
arun(Completions().create(request, token))
|
@@ -18,6 +18,7 @@ from meutils.apis.fal.images import generate as fal_generate
|
|
18
18
|
|
19
19
|
from meutils.apis.gitee.image_to_3d import generate as image_to_3d_generate
|
20
20
|
from meutils.apis.gitee.openai_images import generate as gitee_images_generate
|
21
|
+
from meutils.apis.qwen.chat import Completions as QwenCompletions
|
21
22
|
|
22
23
|
|
23
24
|
async def generate(
|
@@ -38,6 +39,11 @@ async def generate(
|
|
38
39
|
if request.model in {"Qwen-Image", "FLUX_1-Krea-dev"}:
|
39
40
|
return await gitee_images_generate(request, api_key)
|
40
41
|
|
42
|
+
if request.model.startswith("qwen-image"):
|
43
|
+
return await QwenCompletions(api_key=api_key).generate(request)
|
44
|
+
|
45
|
+
|
46
|
+
|
41
47
|
|
42
48
|
# "flux.1-krea-dev"
|
43
49
|
|
meutils/apis/models.py
CHANGED
@@ -38,10 +38,15 @@ def make_billing_model(model: str, request: dict):
|
|
38
38
|
|
39
39
|
elif _model.startswith(("minimax")):
|
40
40
|
# MiniMax-Hailuo-02 T2V-01-Director I2V-01-Director S2V-01 I2V-01-live I2V-01 T2V-01
|
41
|
+
|
41
42
|
duration = request.get("duration", 6)
|
42
43
|
resolution = request.get("resolution", "720P")
|
43
44
|
model = request.get("model", "").lower()
|
44
45
|
|
46
|
+
if "01" in model:
|
47
|
+
duration = 6
|
48
|
+
resolution = "720P"
|
49
|
+
|
45
50
|
if model.startswith("minimax"): # 02
|
46
51
|
resolution = request.get("resolution", "768P")
|
47
52
|
|
meutils/apis/oneapi/channel.py
CHANGED
@@ -258,11 +258,11 @@ if __name__ == '__main__':
|
|
258
258
|
# # arun(delete_channel(range(10000, 20000)))
|
259
259
|
key = "KEY"
|
260
260
|
request = ChannelInfo(name='', key=key)
|
261
|
-
request = ChannelInfo(id=
|
261
|
+
request = ChannelInfo(id=21223, key=key, used_quota=0.001)
|
262
262
|
|
263
|
-
|
264
|
-
|
265
|
-
arun(exist_channel(request, base_url=base_url))
|
263
|
+
arun(create_or_update_channel(request, base_url=base_url))
|
264
|
+
#
|
265
|
+
# arun(exist_channel(request, base_url=base_url))
|
266
266
|
|
267
267
|
"""
|
268
268
|
UPSTREAM_BASE_URL=https://api.ffire.cc
|
meutils/apis/proxy/kdlapi.py
CHANGED
@@ -0,0 +1,233 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# @Project : AI. @by PyCharm
|
4
|
+
# @File : chat
|
5
|
+
# @Time : 2025/8/19 13:22
|
6
|
+
# @Author : betterme
|
7
|
+
# @WeChat : meutils
|
8
|
+
# @Software : PyCharm
|
9
|
+
# @Description : qwen-image
|
10
|
+
|
11
|
+
|
12
|
+
from openai import AsyncOpenAI, OpenAI, AsyncStream
|
13
|
+
|
14
|
+
from meutils.pipe import *
|
15
|
+
from meutils.decorators.retry import retrying
|
16
|
+
from meutils.oss.ali_oss import qwenai_upload
|
17
|
+
from meutils.io.files_utils import to_bytes, guess_mime_type, to_url
|
18
|
+
from meutils.caches import rcache
|
19
|
+
|
20
|
+
from meutils.llm.openai_utils import to_openai_params, create_chat_completion_chunk, token_encoder
|
21
|
+
|
22
|
+
from meutils.config_utils.lark_utils import get_next_token_for_polling
|
23
|
+
from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage, \
|
24
|
+
ChatCompletion, Choice, ChatCompletionMessage
|
25
|
+
from meutils.schemas.image_types import ImageRequest, ImagesResponse
|
26
|
+
|
27
|
+
FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
|
28
|
+
|
29
|
+
base_url = "https://chat.qwen.ai/api/v2"
|
30
|
+
DEFAUL_MODEL = "qwen3-235b-a22b"
|
31
|
+
from fake_useragent import UserAgent
|
32
|
+
|
33
|
+
ua = UserAgent()
|
34
|
+
|
35
|
+
thinking_budget_mapping = {
|
36
|
+
"low": 1000,
|
37
|
+
"medium": 8000,
|
38
|
+
"high": 24000
|
39
|
+
}
|
40
|
+
|
41
|
+
COOKIE = """
|
42
|
+
cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbdaf-3754-461c-a3ff-9ec8005329c9; aui=310cbdaf-3754-461c-a3ff-9ec8005329c9; sca=43897cb0; _gcl_au=1.1.106229673.1748312382.56762171.1748482542.1748482541; xlly_s=1; x-ap=ap-southeast-1; acw_tc=0a03e53917509898782217414e520e5edfcdef667dcbd83b767c0ce464fad4; token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTM1ODE4ODV9.Npy24ubI717JmdSWMrodWSvVRHENgbJ7Knd-Yf158YE; atpsida=705b922fe336ee0d63fcc329_1750989888_2; SERVERID=e8c2af088c314df080fffe7d0976a96b|1750989892|1750910540; tfstk=gGtsWsqG4IKUeosYhNDUAMIBJRIbcvoz6-6vEKEaHGIOG-O2eZBabAYXRIR16hSOMpQpNtDMbtpTlWd2wNEAWA4XAOWy0FJtS6Ef3IDMbiQvps65XZYNg15fcKASLbor4dvGmGlra0WjM37NqSBAMS5d9TSfBJ35KivGmihEsEHyxdAMR0lwBiHCvt6uMiBYDMHC3TXOD1QY9yBR9iIAktIdpOX0DlCYWv9dtOsAMIQtdMChHfD7Ftg1sdMwtHJ00Jm2p6ZYDH6Ki1p6F9XBAwQOwwCQD9-CCN1JBhJB9QBXy3_MwXzN6UTkNTRZvlOWBCTRyhFKOivePI6WXYU5GCvpbwKt3zXhmFLRXnG76ppJBeLJBXzCdepwAw--No_MJCYllnlEqG8yUnbJXcNlTaXXNGLI9lOR4urPNGl0lJ_uc91rdva0oJN5AmdFjVAhW9X18vMQ6EbOK96ndva0oNBhCOMId5Lc.; isg=BNfX7gH7c3OJX_gfCBykQ2rtZk0hHKt-YCofVCkEq6YJWPSaPe8Dz9o-uvjGsIP2; ssxmod_itna=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGm01DBL4qbDnqD80DQeDvYxk0K4MUPhDwpaW8YRw3Mz7GGb48aIzZGzY=0DgSdfOLpmxbD884rDYoDCqDSDxD99OdD4+3Dt4DIDAYDDxDWCeDBBWriDGpdhmbQVqmqvi2dxi3i3mPiDit8xi5bZendVL4zvDDlKPGf3WPt5xGnD0jmxhpdx038aoODzLiDbxEY698DtkHqPOK=MlTiRUXxAkDb9RG=Y2U3iA4G3DhkCXU3QBhxCqM2eeQmkeNzCwkjw/006DDAY2DlqTWweL04MKBeHhY5om5NUwYHuFiieQ0=/R=9iO9xTBhND4KF4dvyqz0/toqlqlzGDD; ssxmod_itna2=iqGxRDuQqWqxgDUxeKYI5q=xBDeMDWK07DzxC5750CDmxjKidKDUGQq7qdOamuu9XYkRGGmibDG85+YNY=exGa3Y64u5DBwiW7r++DxFqCdl=l77NQwckyAaCG64hkCOjO1pkcMRBdqj70N7nk=e94KEQYUxlf+2Dw=ViA+XKDde0uGS+eXgFkQqzYWe0Dd4oGbUj8L4QY4og345X2DjKDNOfQRgfeIKVRFQjqR098dBUrQsXBNQZcG1oBFAp4xkLYHl+W3OQW9ybPF4sML3t1tPX2T4DmCqKL+jN1XX94xpyA6k9+sgyBFY4zXOq7dHOuO3Gd3lidwdrk=8dNrOdrYQo33fobVS=MRF7nNQBC5d3kBbYdwtoxNBKmBiXoTfOTzOp3MT=ODXhxfO16Tta4vSW=ubtkEGgeQ/gKOwsVjmKDEY0NZ+ee7xlitvWmBbtk7ma7x1PinxtbitdadtYQOqG5AFEZbFxiSE6rDky7jiatQ0Fe7z6uDmYx4z5MGxMA5iDY7DtSLfNUYxU44D
|
43
|
+
""".strip()
|
44
|
+
|
45
|
+
|
46
|
+
class Completions(object):
|
47
|
+
def __init__(self, api_key: Optional[str] = None):
|
48
|
+
self.api_key = api_key
|
49
|
+
|
50
|
+
async def generate(self, request: ImageRequest, **kwargs):
|
51
|
+
|
52
|
+
if request.image and not request.image.startswith("http"):
|
53
|
+
request.image = await to_url(request.image, content_type="image/png")
|
54
|
+
|
55
|
+
_ = CompletionRequest(
|
56
|
+
model="qwen-image",
|
57
|
+
stream=True,
|
58
|
+
messages=[
|
59
|
+
{
|
60
|
+
"role": "user",
|
61
|
+
"content": [
|
62
|
+
{"type": "text", "text": request.prompt},
|
63
|
+
|
64
|
+
{
|
65
|
+
"type": "image_url",
|
66
|
+
"image_url": {
|
67
|
+
"url": request.image
|
68
|
+
}
|
69
|
+
}
|
70
|
+
|
71
|
+
],
|
72
|
+
|
73
|
+
}
|
74
|
+
],
|
75
|
+
size=request.aspect_ratio
|
76
|
+
)
|
77
|
+
|
78
|
+
async for chunk in await self.create(_):
|
79
|
+
logger.debug(chunk)
|
80
|
+
if chunk.choices and (url := chunk.choices[0].delta.content):
|
81
|
+
return ImagesResponse(data=[{"url": url}])
|
82
|
+
|
83
|
+
raise Exception("qwen-image 生成失败, 请重试")
|
84
|
+
|
85
|
+
async def create(self, request: CompletionRequest, cookie: Optional[str] = None):
|
86
|
+
api_key = self.api_key or await get_next_token_for_polling(FEISHU_URL)
|
87
|
+
|
88
|
+
self.client = AsyncOpenAI(
|
89
|
+
base_url=base_url,
|
90
|
+
api_key=api_key,
|
91
|
+
default_headers={
|
92
|
+
'User-Agent': ua.random,
|
93
|
+
'Cookie': cookie or COOKIE
|
94
|
+
}
|
95
|
+
)
|
96
|
+
|
97
|
+
chat_id = await self.create_new_chat()
|
98
|
+
|
99
|
+
# request.last_user_content
|
100
|
+
# request.last_urls.get("image_url", [])
|
101
|
+
|
102
|
+
payload = {
|
103
|
+
"chat_id": chat_id,
|
104
|
+
"stream": request.stream,
|
105
|
+
"incremental_output": True,
|
106
|
+
"chat_mode": "normal",
|
107
|
+
"model": "qwen3-235b-a22b",
|
108
|
+
"messages": [
|
109
|
+
{
|
110
|
+
"role": "user",
|
111
|
+
"content": request.last_user_content,
|
112
|
+
|
113
|
+
"user_action": "chat",
|
114
|
+
"files": [],
|
115
|
+
"models": [
|
116
|
+
"qwen3-235b-a22b"
|
117
|
+
],
|
118
|
+
"chat_type": "t2t",
|
119
|
+
# "chat_type": "t2i",
|
120
|
+
# "chat_type": "image_edit",
|
121
|
+
|
122
|
+
"feature_config": {
|
123
|
+
"thinking_enabled": request.enable_thinking or False,
|
124
|
+
"output_schema": "phase"
|
125
|
+
},
|
126
|
+
"extra": {
|
127
|
+
"meta": {
|
128
|
+
"subChatType": "t2t"
|
129
|
+
}
|
130
|
+
}
|
131
|
+
}
|
132
|
+
],
|
133
|
+
"size": request.size if hasattr(request, "size") else "1:1"
|
134
|
+
}
|
135
|
+
|
136
|
+
if request.model.startswith("qwen-image"):
|
137
|
+
payload["messages"][0]["chat_type"] = "t2i"
|
138
|
+
if image_urls := request.last_urls.get("image_url"):
|
139
|
+
payload["messages"][0]["chat_type"] = "image_edit"
|
140
|
+
payload["messages"][0]["files"] = [
|
141
|
+
{
|
142
|
+
"type": "image",
|
143
|
+
"name": "example.png",
|
144
|
+
"file_type": "image",
|
145
|
+
"showType": "image",
|
146
|
+
"file_class": "vision",
|
147
|
+
"url": image_urls[-1] # todo 阿里对象存储
|
148
|
+
}
|
149
|
+
]
|
150
|
+
|
151
|
+
data = to_openai_params(payload)
|
152
|
+
logger.debug(bjson(data))
|
153
|
+
response = await self.client.chat.completions.create(**data, extra_query={"chat_id": chat_id})
|
154
|
+
# response = self.do_response(response)
|
155
|
+
|
156
|
+
if isinstance(response, AsyncStream): # image
|
157
|
+
# async def gen():
|
158
|
+
# async for chunk in response:
|
159
|
+
# if url := chunk.choices[0].delta.content:
|
160
|
+
# yield f"[]{url}"
|
161
|
+
|
162
|
+
return response
|
163
|
+
|
164
|
+
else:
|
165
|
+
logger.debug(response)
|
166
|
+
|
167
|
+
if hasattr(response, "data") and (choices := response.data.get("choices")):
|
168
|
+
response = response.model_construct(choices=choices)
|
169
|
+
logger.debug(response)
|
170
|
+
|
171
|
+
prompt_tokens = len(token_encoder.encode(str(request.messages)))
|
172
|
+
completion_tokens = len(token_encoder.encode(str(response.choices[0].message.content)))
|
173
|
+
usage = {
|
174
|
+
"prompt_tokens": prompt_tokens,
|
175
|
+
"completion_tokens": completion_tokens,
|
176
|
+
"total_tokens": prompt_tokens + completion_tokens
|
177
|
+
}
|
178
|
+
response.usage = usage
|
179
|
+
return response
|
180
|
+
|
181
|
+
async def create_new_chat(self):
|
182
|
+
payload = {
|
183
|
+
"title": "新建对话",
|
184
|
+
"models": [DEFAUL_MODEL],
|
185
|
+
"chat_mode": "normal",
|
186
|
+
"chat_type": "t2i",
|
187
|
+
"timestamp": time.time() * 1000 // 1
|
188
|
+
}
|
189
|
+
resp = await self.client.post('/chats/new', body=payload, cast_to=object)
|
190
|
+
logger.debug(resp)
|
191
|
+
return resp['data']['id']
|
192
|
+
|
193
|
+
|
194
|
+
if __name__ == '__main__':
|
195
|
+
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTgxNTc1Njh9.eihH3NVrzJCg9bdWb9mim9rGKTLKn1a66kW2Cqc0uPM"
|
196
|
+
request = CompletionRequest(
|
197
|
+
# model="qwen3-235b-a22b",
|
198
|
+
model="qwen-image",
|
199
|
+
|
200
|
+
messages=[
|
201
|
+
|
202
|
+
{
|
203
|
+
"role": "user",
|
204
|
+
"content": [
|
205
|
+
{"type": "text", "text": "带个墨镜"},
|
206
|
+
|
207
|
+
{
|
208
|
+
"type": "image_url",
|
209
|
+
"image_url": {
|
210
|
+
"url": "https://oss.ffire.cc/files/kling_watermark.png"
|
211
|
+
}
|
212
|
+
}
|
213
|
+
|
214
|
+
],
|
215
|
+
|
216
|
+
}
|
217
|
+
],
|
218
|
+
stream=True,
|
219
|
+
|
220
|
+
enable_thinking=True,
|
221
|
+
|
222
|
+
size="16:9"
|
223
|
+
|
224
|
+
)
|
225
|
+
|
226
|
+
# arun(Completions(api_key=token).create(request))
|
227
|
+
|
228
|
+
request = ImageRequest(
|
229
|
+
model="qwen-image",
|
230
|
+
prompt="带个墨镜",
|
231
|
+
image = "https://oss.ffire.cc/files/kling_watermark.png"
|
232
|
+
)
|
233
|
+
arun(Completions(api_key=token).generate(request))
|