MeUtils 2025.8.7.20.17.40__py3-none-any.whl → 2025.8.11.16.30.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,7 @@
13
13
  AttributeError: 'str' object has no attribute 'choices'
14
14
 
15
15
  """
16
+ import time
16
17
 
17
18
  from openai import AsyncOpenAI
18
19
 
@@ -31,6 +32,7 @@ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk,
31
32
  FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
32
33
 
33
34
  base_url = "https://chat.qwen.ai/api"
35
+ DEFAUL_MODEL = "qwen3-235b-a22b"
34
36
 
35
37
  from fake_useragent import UserAgent
36
38
 
@@ -47,6 +49,28 @@ cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbd
47
49
  """.strip()
48
50
 
49
51
 
52
+ @retrying()
53
+ async def create_new_chat(api_key, cookie: Optional[str] = None):
54
+ qwen_client = AsyncOpenAI(
55
+ base_url=base_url,
56
+ api_key=api_key,
57
+ default_headers={
58
+ 'User-Agent': ua.random,
59
+ 'Cookie': cookie or COOKIE
60
+ }
61
+ )
62
+ payload = {
63
+ "title": "新建对话",
64
+ "models": [DEFAUL_MODEL],
65
+ "chat_mode": "normal",
66
+ "chat_type": "t2i",
67
+ "timestamp": time.time() * 1000 // 1
68
+ }
69
+ resp = await qwen_client.post('/v2/chats/new', body=payload, cast_to=object)
70
+ logger.debug(resp)
71
+ return resp['data']['id']
72
+
73
+
50
74
  @retrying()
51
75
  async def to_file(file, api_key, cookie: Optional[str] = None):
52
76
  qwen_client = AsyncOpenAI(
@@ -65,8 +89,6 @@ async def to_file(file, api_key, cookie: Optional[str] = None):
65
89
  logger.debug(file_object)
66
90
  return file_object
67
91
 
68
- # todo
69
- # oss
70
92
 
71
93
  async def create(request: CompletionRequest, token: Optional[str] = None, cookie: Optional[str] = None):
72
94
  cookie = cookie or COOKIE
@@ -78,25 +100,38 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
78
100
 
79
101
  logger.debug(token)
80
102
 
103
+ default_query = None
104
+ if 'image' in request.model:
105
+ chat_id = await create_new_chat(token, cookie)
106
+ default_query = {'chat_id': chat_id}
107
+
81
108
  client = AsyncOpenAI(
82
109
  base_url=base_url,
83
110
  api_key=token,
84
111
  default_headers={
85
112
  'User-Agent': ua.random,
86
113
  'Cookie': cookie,
87
- }
114
+ },
115
+
116
+ default_query=default_query
88
117
  )
89
118
  # qwen结构
90
119
  model = request.model.lower()
91
120
  if any(i in model for i in ("research",)): # 遇到错误 任意切换
92
- request.model = np.random.choice({""})
121
+ request.model = DEFAUL_MODEL
93
122
  request.messages[-1]['chat_type'] = "deep_research"
94
123
 
95
- # request.messages["extra"] = {
96
- # "meta": {
97
- # "subChatType": "deep_thinking"
98
- # }
99
- # }
124
+ elif any(i in model for i in ("image",)):
125
+ request.model = DEFAUL_MODEL
126
+ request.chat_id = default_query['chat_id']
127
+ request.size = "1:1"
128
+
129
+ request.messages[-1]['chat_type'] = "t2i"
130
+ request.messages[-1]['feature_config'] = {
131
+ "thinking_enabled": False,
132
+ "output_schema": "phase"
133
+ }
134
+
100
135
 
101
136
  elif any(i in model for i in ("search",)):
102
137
  request.model = "qwen-max-latest"
@@ -105,7 +140,7 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
105
140
  # 混合推理
106
141
  if (request.reasoning_effort
107
142
  or request.last_user_content.startswith("/think")
108
- or hasattr(request, "enable_thinking")
143
+ or request.enable_thinking
109
144
  or hasattr(request, "thinking_budget")
110
145
  ):
111
146
  feature_config = {"thinking_enabled": True, "output_schema": "phase"}
@@ -168,6 +203,8 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
168
203
  request.incremental_output = True # 增量输出
169
204
  data = to_openai_params(request)
170
205
 
206
+ logger.debug(data)
207
+
171
208
  # 流式转非流
172
209
  data['stream'] = True
173
210
  chunks = await client.chat.completions.create(**data)
@@ -226,7 +263,7 @@ if __name__ == '__main__':
226
263
  user_content = [
227
264
  {
228
265
  "type": "text",
229
- "text": "一句话总结"
266
+ "text": "主体文字'诸事皆顺',超粗笔画、流畅飘逸、有飞白效果的狂野奔放草书字体,鎏金质感且有熔金流动感和泼溅金箔效果,黑色带细微噪点肌理背景,英文'GOOD LUCK'浅金色或灰白色,有淡淡的道家符文点缀,书法字体海报场景,传统书法与现代设计融合风格,特写,神秘奢华充满能量,焦点清晰,对比强烈"
230
267
  },
231
268
  # {
232
269
  # "type": "image_url",
@@ -236,7 +273,15 @@ if __name__ == '__main__':
236
273
  # }
237
274
  ]
238
275
 
239
- user_content = "1+1"
276
+ user_content = "主体文字'诸事皆顺',超粗笔画、流畅飘逸、有飞白效果的狂野奔放草书字体,鎏金质感且有熔金流动感和泼溅金箔效果,黑色带细微噪点肌理背景,英文'GOOD LUCK'浅金色或灰白色,有淡淡的道家符文点缀,书法字体海报场景,传统书法与现代设计融合风格,特写,神秘奢华充满能量,焦点清晰,对比强烈"
277
+ # {
278
+ # "type": "image_url",
279
+ # "image_url": {
280
+ # "url": "https://fyb-pc-static.cdn.bcebos.com/static/asset/homepage@2x_daaf4f0f6cf971ed6d9329b30afdf438.png"
281
+ # }
282
+ # }
283
+
284
+ # user_content = "1+1"
240
285
  # user_content = "/think 1+1"
241
286
 
242
287
  # user_content = [
@@ -270,11 +315,13 @@ if __name__ == '__main__':
270
315
  # ]
271
316
 
272
317
  request = CompletionRequest(
318
+ # model="qwen3-235b-a22b",
273
319
  # model="qwen-turbo-2024-11-01",
274
320
  # model="qwen-max-latest",
275
321
  # model="qvq-max-2025-03-25",
276
322
  # model="qvq-72b-preview-0310",
277
- model="qwen2.5-omni-7b",
323
+ # model="qwen2.5-omni-7b",
324
+ model="qwen-image",
278
325
  # model="qwen-plus",
279
326
 
280
327
  # model="qwen-max-latest-search",
@@ -361,15 +408,17 @@ if __name__ == '__main__':
361
408
  # stream=True,
362
409
 
363
410
  # reasoning_effort="low",
364
- enable_thinking=True,
365
- thinking_budget=1024,
411
+ # enable_thinking=True,
412
+ # thinking_budget=1024,
366
413
  # stream_options={"include_usage": True},
367
414
 
368
415
  )
369
416
  token = None
370
417
 
371
- # token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImV4cCI6MTc0ODQ3OTE0M30.oAIE1K0XA0YYqlxB8Su-u0UJbY_BBZa4_tvZpFJKxGY"
418
+ token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTc0ODczMDd9.7TQ9NicXYxghzI7EP3cPMFqa5j-09Sz1B9s3SnKZvkE"
372
419
 
373
420
  arun(create(request, token))
374
421
 
375
422
  # arun(to_file("https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250211/tixcef/cherry.wav", token))
423
+
424
+ # arun(create_new_chat(token))
@@ -21,6 +21,8 @@ from contextlib import asynccontextmanager
21
21
  from meutils.pipe import *
22
22
  from meutils.llm.clients import AsyncOpenAI
23
23
  from meutils.apis.utils import make_request
24
+ from meutils.apis.oneapi.user import get_user_money
25
+ from fastapi import status, HTTPException
24
26
 
25
27
  base_url = "https://api.chatfire.cn/flux/v1"
26
28
 
@@ -200,15 +202,27 @@ async def billing_flow_for_async_task(
200
202
  n: float = 1,
201
203
  api_key: Optional[str] = None
202
204
  ):
203
- if n: # 计费
204
- await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
205
+ if n and (user_money := await get_user_money(api_key)): # 计费
206
+
207
+ # 判断
208
+ if user_money < 1:
209
+ raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
210
+
211
+ # 执行
205
212
  yield
206
213
 
214
+ # 计费
215
+ await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
216
+
207
217
  else: # 不计费
208
218
  a = yield
209
219
  logger.debug(a)
210
220
 
211
221
 
222
+ # # 检查余额
223
+ # if user_money := await get_user_money(api_key):
224
+ # if user_money < 1:
225
+ # raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
212
226
  @asynccontextmanager
213
227
  async def billing_flow_for_tokens(
214
228
  model: str = "usage-chat",
@@ -81,6 +81,7 @@ FAL_MODELS = {
81
81
 
82
82
  # veo
83
83
  "fal-ai/veo3/fast": 3.2 * FAL_, # 2 3.2
84
+ "fal-ai/veo3/fast/image-to-video": 3.2 * FAL_,
84
85
 
85
86
  # seedance
86
87
  "fal-ai/bytedance/seedance/v1/lite/text-to-video": 1,
@@ -141,7 +142,7 @@ MODEL_PRICE = {
141
142
  "gemini-2.5-pro-video": 0.1,
142
143
 
143
144
  # rix
144
- "kling_image": 0.05,
145
+ "kling_image": 0.025,
145
146
  "kling_image_expand": 0.3,
146
147
  "kling_virtual_try_on": 1,
147
148
  "kling_effects": 1,
@@ -167,6 +168,7 @@ MODEL_PRICE = {
167
168
  # 火山
168
169
  "jimeng-video-3.0": 0.5,
169
170
  "doubao-seedream-3-0-t2i-250415": 0.1,
171
+ "doubao-seededit-3-0-i2i-250628": 0.1,
170
172
 
171
173
  "doubao-seedance-1-0-pro-250528": 0.5, # 480 1080
172
174
  "api-doubao-seedance-1-0-pro-250528-1080p": 2, # 480 1080
@@ -410,6 +412,7 @@ MODEL_PRICE = {
410
412
  "gpt-4o-image": 0.05,
411
413
  "sora-image": 0.1,
412
414
  "gpt-image-1": 0.1,
415
+ "gpt-5-thinking": 0.05,
413
416
 
414
417
  "gpt-4-gizmo-*": 0.1,
415
418
  "advanced-voice": 1,
@@ -965,6 +968,7 @@ MODEL_RATIO = {
965
968
  "claude-sonnet-4-20250514-thinking": 1.5,
966
969
  "claude-opus-4-20250514": 7.5,
967
970
  "claude-opus-4-20250514-thinking": 7.5,
971
+ "claude-opus-4.1": 7.5,
968
972
 
969
973
  "deepclaude": 1.5,
970
974
  "deep-claude": 1.5,
@@ -1059,8 +1063,6 @@ MODEL_RATIO = {
1059
1063
  "gpt-4-1106-vision-preview": 5,
1060
1064
  "gpt-4-32k": 30,
1061
1065
  "gpt-4-32k-0613": 30,
1062
- "gpt-4-all": 15,
1063
- "gpt-4-gizmo-*": 15,
1064
1066
  "gpt-4-turbo": 5,
1065
1067
  "gpt-4-turbo-2024-04-09": 5,
1066
1068
  "gpt-4-turbo-preview": 5,
@@ -1094,6 +1096,14 @@ MODEL_RATIO = {
1094
1096
  "gpt-oss-20b": 0.025,
1095
1097
  "gpt-oss-120b": 0.05,
1096
1098
 
1099
+ "gpt-5": 0.625,
1100
+ "gpt-5-2025-08-07": 0.625,
1101
+ "gpt-5-chat-latest": 0.625,
1102
+ "gpt-5-mini": 0.125,
1103
+ "gpt-5-mini-2025-08-07": 0.125,
1104
+ "gpt-5-nano": 0.025,
1105
+ "gpt-5-nano-2025-08-07": 0.025,
1106
+
1097
1107
  "o1": 7.5,
1098
1108
  "o1-2024-12-17": 7.5,
1099
1109
 
@@ -1217,9 +1227,6 @@ COMPLETION_RATIO = {
1217
1227
  "grok-3-mini-fast-beta": 4 / 0.6,
1218
1228
  "grok-4": 5,
1219
1229
 
1220
- "gpt-4-all": 4,
1221
- "gpt-4-gizmo-*": 4,
1222
- "gpt-4o-all": 4,
1223
1230
  "gpt-4.5-preview-2025-02-27": 2,
1224
1231
 
1225
1232
  "o1-mini": 4,
@@ -1254,6 +1261,14 @@ COMPLETION_RATIO = {
1254
1261
  "gpt-oss-20b": 4,
1255
1262
  "gpt-oss-120b": 4,
1256
1263
 
1264
+ "gpt-5": 8,
1265
+ "gpt-5-2025-08-07": 8,
1266
+ "gpt-5-chat-latest": 8,
1267
+ "gpt-5-mini": 8,
1268
+ "gpt-5-mini-2025-08-07": 8,
1269
+ "gpt-5-nano": 4,
1270
+ "gpt-5-nano-2025-08-07": 4,
1271
+
1257
1272
  # claude
1258
1273
  "claude-3-5-haiku-20241022": 5,
1259
1274
  "anthropic/claude-3-5-haiku-20241022:beta": 5,
@@ -1273,6 +1288,7 @@ COMPLETION_RATIO = {
1273
1288
  "claude-sonnet-4-20250514-thinking": 5,
1274
1289
  "claude-opus-4-20250514": 5,
1275
1290
  "claude-opus-4-20250514-thinking": 5,
1291
+ "claude-opus-4.1": 5,
1276
1292
 
1277
1293
  "llama-3.1-70b-instruct": 2,
1278
1294
  "meta-llama/Meta-Llama-3.1-70B-Instruct": 2,
@@ -1778,3 +1794,6 @@ if __name__ == '__main__':
1778
1794
 
1779
1795
  print("FAL按次")
1780
1796
  print(','.join(FAL_MODELS)) # fal 按次
1797
+
1798
+ print('\n\n')
1799
+ print([k for k in MODEL_RATIO if k.startswith(('gpt-5'))] | xjoin(","))