MeUtils 2025.8.7.20.31.52__py3-none-any.whl → 2025.8.12.17.29.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,7 @@
13
13
  AttributeError: 'str' object has no attribute 'choices'
14
14
 
15
15
  """
16
+ import time
16
17
 
17
18
  from openai import AsyncOpenAI
18
19
 
@@ -31,6 +32,7 @@ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk,
31
32
  FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
32
33
 
33
34
  base_url = "https://chat.qwen.ai/api"
35
+ DEFAUL_MODEL = "qwen3-235b-a22b"
34
36
 
35
37
  from fake_useragent import UserAgent
36
38
 
@@ -47,6 +49,28 @@ cna=KP9DIEqqyjUCATrw/+LjJV8F; _bl_uid=LXmp28z7dwezpmyejeXL9wh6U1Rb; cnaui=310cbd
47
49
  """.strip()
48
50
 
49
51
 
52
+ @retrying()
53
+ async def create_new_chat(api_key, cookie: Optional[str] = None):
54
+ qwen_client = AsyncOpenAI(
55
+ base_url=base_url,
56
+ api_key=api_key,
57
+ default_headers={
58
+ 'User-Agent': ua.random,
59
+ 'Cookie': cookie or COOKIE
60
+ }
61
+ )
62
+ payload = {
63
+ "title": "新建对话",
64
+ "models": [DEFAUL_MODEL],
65
+ "chat_mode": "normal",
66
+ "chat_type": "t2i",
67
+ "timestamp": time.time() * 1000 // 1
68
+ }
69
+ resp = await qwen_client.post('/v2/chats/new', body=payload, cast_to=object)
70
+ logger.debug(resp)
71
+ return resp['data']['id']
72
+
73
+
50
74
  @retrying()
51
75
  async def to_file(file, api_key, cookie: Optional[str] = None):
52
76
  qwen_client = AsyncOpenAI(
@@ -65,8 +89,6 @@ async def to_file(file, api_key, cookie: Optional[str] = None):
65
89
  logger.debug(file_object)
66
90
  return file_object
67
91
 
68
- # todo
69
- # oss
70
92
 
71
93
  async def create(request: CompletionRequest, token: Optional[str] = None, cookie: Optional[str] = None):
72
94
  cookie = cookie or COOKIE
@@ -78,25 +100,38 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
78
100
 
79
101
  logger.debug(token)
80
102
 
103
+ default_query = None
104
+ if 'image' in request.model:
105
+ chat_id = await create_new_chat(token, cookie)
106
+ default_query = {'chat_id': chat_id}
107
+
81
108
  client = AsyncOpenAI(
82
109
  base_url=base_url,
83
110
  api_key=token,
84
111
  default_headers={
85
112
  'User-Agent': ua.random,
86
113
  'Cookie': cookie,
87
- }
114
+ },
115
+
116
+ default_query=default_query
88
117
  )
89
118
  # qwen结构
90
119
  model = request.model.lower()
91
120
  if any(i in model for i in ("research",)): # 遇到错误 任意切换
92
- request.model = np.random.choice({""})
121
+ request.model = DEFAUL_MODEL
93
122
  request.messages[-1]['chat_type'] = "deep_research"
94
123
 
95
- # request.messages["extra"] = {
96
- # "meta": {
97
- # "subChatType": "deep_thinking"
98
- # }
99
- # }
124
+ elif any(i in model for i in ("image",)):
125
+ request.model = DEFAUL_MODEL
126
+ request.chat_id = default_query['chat_id']
127
+ request.size = "1:1"
128
+
129
+ request.messages[-1]['chat_type'] = "t2i"
130
+ request.messages[-1]['feature_config'] = {
131
+ "thinking_enabled": False,
132
+ "output_schema": "phase"
133
+ }
134
+
100
135
 
101
136
  elif any(i in model for i in ("search",)):
102
137
  request.model = "qwen-max-latest"
@@ -105,7 +140,7 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
105
140
  # 混合推理
106
141
  if (request.reasoning_effort
107
142
  or request.last_user_content.startswith("/think")
108
- or hasattr(request, "enable_thinking")
143
+ or request.enable_thinking
109
144
  or hasattr(request, "thinking_budget")
110
145
  ):
111
146
  feature_config = {"thinking_enabled": True, "output_schema": "phase"}
@@ -168,6 +203,8 @@ async def create(request: CompletionRequest, token: Optional[str] = None, cookie
168
203
  request.incremental_output = True # 增量输出
169
204
  data = to_openai_params(request)
170
205
 
206
+ logger.debug(data)
207
+
171
208
  # 流式转非流
172
209
  data['stream'] = True
173
210
  chunks = await client.chat.completions.create(**data)
@@ -226,7 +263,7 @@ if __name__ == '__main__':
226
263
  user_content = [
227
264
  {
228
265
  "type": "text",
229
- "text": "一句话总结"
266
+ "text": "主体文字'诸事皆顺',超粗笔画、流畅飘逸、有飞白效果的狂野奔放草书字体,鎏金质感且有熔金流动感和泼溅金箔效果,黑色带细微噪点肌理背景,英文'GOOD LUCK'浅金色或灰白色,有淡淡的道家符文点缀,书法字体海报场景,传统书法与现代设计融合风格,特写,神秘奢华充满能量,焦点清晰,对比强烈"
230
267
  },
231
268
  # {
232
269
  # "type": "image_url",
@@ -236,7 +273,15 @@ if __name__ == '__main__':
236
273
  # }
237
274
  ]
238
275
 
239
- user_content = "1+1"
276
+ user_content = "主体文字'诸事皆顺',超粗笔画、流畅飘逸、有飞白效果的狂野奔放草书字体,鎏金质感且有熔金流动感和泼溅金箔效果,黑色带细微噪点肌理背景,英文'GOOD LUCK'浅金色或灰白色,有淡淡的道家符文点缀,书法字体海报场景,传统书法与现代设计融合风格,特写,神秘奢华充满能量,焦点清晰,对比强烈"
277
+ # {
278
+ # "type": "image_url",
279
+ # "image_url": {
280
+ # "url": "https://fyb-pc-static.cdn.bcebos.com/static/asset/homepage@2x_daaf4f0f6cf971ed6d9329b30afdf438.png"
281
+ # }
282
+ # }
283
+
284
+ # user_content = "1+1"
240
285
  # user_content = "/think 1+1"
241
286
 
242
287
  # user_content = [
@@ -270,11 +315,13 @@ if __name__ == '__main__':
270
315
  # ]
271
316
 
272
317
  request = CompletionRequest(
318
+ # model="qwen3-235b-a22b",
273
319
  # model="qwen-turbo-2024-11-01",
274
320
  # model="qwen-max-latest",
275
321
  # model="qvq-max-2025-03-25",
276
322
  # model="qvq-72b-preview-0310",
277
- model="qwen2.5-omni-7b",
323
+ # model="qwen2.5-omni-7b",
324
+ model="qwen-image",
278
325
  # model="qwen-plus",
279
326
 
280
327
  # model="qwen-max-latest-search",
@@ -361,15 +408,17 @@ if __name__ == '__main__':
361
408
  # stream=True,
362
409
 
363
410
  # reasoning_effort="low",
364
- enable_thinking=True,
365
- thinking_budget=1024,
411
+ # enable_thinking=True,
412
+ # thinking_budget=1024,
366
413
  # stream_options={"include_usage": True},
367
414
 
368
415
  )
369
416
  token = None
370
417
 
371
- # token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImV4cCI6MTc0ODQ3OTE0M30.oAIE1K0XA0YYqlxB8Su-u0UJbY_BBZa4_tvZpFJKxGY"
418
+ token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxMGNiZGFmLTM3NTQtNDYxYy1hM2ZmLTllYzgwMDUzMjljOSIsImxhc3RfcGFzc3dvcmRfY2hhbmdlIjoxNzUwNjYwODczLCJleHAiOjE3NTc0ODczMDd9.7TQ9NicXYxghzI7EP3cPMFqa5j-09Sz1B9s3SnKZvkE"
372
419
 
373
420
  arun(create(request, token))
374
421
 
375
422
  # arun(to_file("https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250211/tixcef/cherry.wav", token))
423
+
424
+ # arun(create_new_chat(token))
@@ -220,3 +220,6 @@ if __name__ == '__main__':
220
220
  # if not arun(check_token(i)):
221
221
  # print(i)
222
222
  #
223
+
224
+ # token="""_ga_RPMZTEBERQ=GS2.1.s1750752972$o9$g1$t1750752995$j37$l0$h0;_qimei_q36=;sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%223133038818%22%2C%22first_id%22%3A%22191b198c7b2d52-0fcca8d731cb9b8-18525637-2073600-191b198c7b31fd9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_utm_medium%22%3A%22cpc%22%2C%22%24search_keyword_id%22%3A%22c8f5e19c000022920000000268799a56%22%2C%22%24search_keyword_id_type%22%3A%22baidu_seo_keyword_id%22%2C%22%24search_keyword_id_hash%22%3A4558975629240447%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTkxYjE5OGM3YjJkNTItMGZjY2E4ZDczMWNiOWI4LTE4NTI1NjM3LTIwNzM2MDAtMTkxYjE5OGM3YjMxZmQ5IiwiJGlkZW50aXR5X2xvZ2luX2lkIjoiMzEzMzAzODgxOCJ9%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%223133038818%22%7D%2C%22%24device_id%22%3A%22191b198c7b2d52-0fcca8d731cb9b8-18525637-2073600-191b198c7b31fd9%22%7D;_qimei_i_3=4dfa758b920b50d2c9c5fe365ad77ae6f6bda2a2135d578ab5dc280d219a713a676061973989e285d096;_ga=GA1.2.981511920.1725261466;qcloud_from=qcloud.baidu.seo-1752799836749;web_uid=ac283ec7-4bf6-40c9-a0ce-5a2e0cd7db06;_gcl_au=1.1.295860038.1750044382;_qimei_i_1=53c92ed79c5c53d997c6a830538677e2f7bdf0f51209518bb38e2f582593206c616336913980e4ddd6f3eec5;hy_source=web;_qimei_fingerprint=efbb885a22f7d4e5589008c28bc8e7ba;_qimei_h38=e9632faf082420cd40bb971703000001419610;_qimei_uuid42=18c0310102d1002a082420cd40bb9717523c3c7e12;hy_token=8tE8bq6InCxff5mUqQZfc9aGHP6NPD80Cr/k258SiLJ0SRKVmpnUylkLLyDfCVTF9ODHgBEHyOLIcel29d1mX0kymysiIaZYSDr6Xzq5lVMpsKUcFcSvEUaC7i7OWJLxaG2UHBpv1r6rzNJ1AK/vdJB/JgR+VfBuyHcBAhZvFjI+SK5/XXKJHVlQUSk0sDcCKUoLec4xWHnRXFsGT+xcy8LTSuM0rD2AtdD1SIHpuk4H5mCnFHzFJZki+Zm2BLnGRhOqCEjD1GTT1fh8a5H2aGRG1wLSdZEkxtUN2JfwC9005MvGjklEVpb+Vjuhkj8yxQveWM38lQ6s+4eZ5RXM4RBvjWe/IcVXqbSEhkLFKaHED/pVIxDXgjRWJhcRXo36w5VWzc7XO6/qJRouVj6/VpHFNYBtaIR25SC3itS138QdEo5EDEHQtGap/R0jxaiPKSqnDQ70Uzwd4ORrdBE31eCQqK1oyiG5KmPFj4azXTJKn+VejoUJxBBXqMPMtsv+b9e8Plh2dpW3vNepa9nMOQ9gJVynX6KJdgMKP4Ea5W1UOuUW8P/MHR787PpxwyRRz6D7ZDs2RhSvNvmCJzG6Cw==;hy_user=1bc4978e537649caae881f86ba807bca"""
225
+ # arun(check_token(token))
@@ -21,6 +21,8 @@ from contextlib import asynccontextmanager
21
21
  from meutils.pipe import *
22
22
  from meutils.llm.clients import AsyncOpenAI
23
23
  from meutils.apis.utils import make_request
24
+ from meutils.apis.oneapi.user import get_user_money
25
+ from fastapi import status, HTTPException
24
26
 
25
27
  base_url = "https://api.chatfire.cn/flux/v1"
26
28
 
@@ -200,15 +202,27 @@ async def billing_flow_for_async_task(
200
202
  n: float = 1,
201
203
  api_key: Optional[str] = None
202
204
  ):
203
- if n: # 计费
204
- await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
205
+ if n and (user_money := await get_user_money(api_key)): # 计费
206
+
207
+ # 判断
208
+ if user_money < 1:
209
+ raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
210
+
211
+ # 执行
205
212
  yield
206
213
 
214
+ # 计费
215
+ await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
216
+
207
217
  else: # 不计费
208
218
  a = yield
209
219
  logger.debug(a)
210
220
 
211
221
 
222
+ # # 检查余额
223
+ # if user_money := await get_user_money(api_key):
224
+ # if user_money < 1:
225
+ # raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
212
226
  @asynccontextmanager
213
227
  async def billing_flow_for_tokens(
214
228
  model: str = "usage-chat",
@@ -20,7 +20,7 @@ MJ_RELAX = 1
20
20
  MJ_FAST = 1.5
21
21
 
22
22
  STEP = 2
23
- MINIMAX_VIDEO = 3
23
+ MINIMAX_VIDEO = 2 * 0.6
24
24
 
25
25
  FAL = 3
26
26
  FAL_ = 5
@@ -81,6 +81,7 @@ FAL_MODELS = {
81
81
 
82
82
  # veo
83
83
  "fal-ai/veo3/fast": 3.2 * FAL_, # 2 3.2
84
+ "fal-ai/veo3/fast/image-to-video": 3.2 * FAL_,
84
85
 
85
86
  # seedance
86
87
  "fal-ai/bytedance/seedance/v1/lite/text-to-video": 1,
@@ -141,7 +142,7 @@ MODEL_PRICE = {
141
142
  "gemini-2.5-pro-video": 0.1,
142
143
 
143
144
  # rix
144
- "kling_image": 0.05,
145
+ "kling_image": 0.025,
145
146
  "kling_image_expand": 0.3,
146
147
  "kling_virtual_try_on": 1,
147
148
  "kling_effects": 1,
@@ -167,6 +168,7 @@ MODEL_PRICE = {
167
168
  # 火山
168
169
  "jimeng-video-3.0": 0.5,
169
170
  "doubao-seedream-3-0-t2i-250415": 0.1,
171
+ "doubao-seededit-3-0-i2i-250628": 0.1,
170
172
 
171
173
  "doubao-seedance-1-0-pro-250528": 0.5, # 480 1080
172
174
  "api-doubao-seedance-1-0-pro-250528-1080p": 2, # 480 1080
@@ -184,13 +186,25 @@ MODEL_PRICE = {
184
186
  "veo3-pro": 8,
185
187
  "veo3-pro-frames": 8,
186
188
 
187
- # hailuo
189
+ # hailuo https://www.minimax.io/price
188
190
  "minimax-hailuo-02": 1,
189
191
  "api-minimax-hailuo-01-6s": 0.5,
190
192
  "api-minimax-hailuo-02-6s-768p": 1,
191
193
  "api-minimax-hailuo-02-10s-768p": 2,
192
194
  "api-minimax-hailuo-02-6s-1080p": 2,
193
195
 
196
+ "minimax-t2v-01_6s_720p": 1 * MINIMAX_VIDEO,
197
+ "minimax-t2v-01-director_6s_720p": 1 * MINIMAX_VIDEO,
198
+ "minimax-i2v-01_6s_720p": 1 * MINIMAX_VIDEO,
199
+ "minimax-i2v-01-director_6s_720p": 1 * MINIMAX_VIDEO,
200
+ "minimax-i2v-01-live_6s_720p": 1 * MINIMAX_VIDEO,
201
+ "minimax-s2v-01_6s_720p": 1.5 * MINIMAX_VIDEO,
202
+ "minimax-hailuo-02_6s_512p": 0.3 * MINIMAX_VIDEO,
203
+ "minimax-hailuo-02_10s_512p": 0.5 * MINIMAX_VIDEO,
204
+ "minimax-hailuo-02_6s_768p": 1 * MINIMAX_VIDEO,
205
+ "minimax-hailuo-02_10s_768p": 2 * MINIMAX_VIDEO,
206
+ "minimax-hailuo-02_6s_1080p": 2 * MINIMAX_VIDEO,
207
+
194
208
  # chatfire
195
209
  "ppu-01": 0.01,
196
210
  "ppu-1": 0.1,
@@ -410,6 +424,7 @@ MODEL_PRICE = {
410
424
  "gpt-4o-image": 0.05,
411
425
  "sora-image": 0.1,
412
426
  "gpt-image-1": 0.1,
427
+ "gpt-5-thinking": 0.05,
413
428
 
414
429
  "gpt-4-gizmo-*": 0.1,
415
430
  "advanced-voice": 1,
@@ -653,6 +668,7 @@ MODEL_RATIO = {
653
668
  "glm-4.5-airx": 1,
654
669
  "glm-4.5": 0.5,
655
670
  "glm-4.5-x": 2,
671
+ "glm-4.5v": 1,
656
672
 
657
673
  # 阿里千问 https://dashscope.console.aliyun.com/billing
658
674
  "qwen-long": 0.25,
@@ -965,6 +981,7 @@ MODEL_RATIO = {
965
981
  "claude-sonnet-4-20250514-thinking": 1.5,
966
982
  "claude-opus-4-20250514": 7.5,
967
983
  "claude-opus-4-20250514-thinking": 7.5,
984
+ "claude-opus-4.1": 7.5,
968
985
 
969
986
  "deepclaude": 1.5,
970
987
  "deep-claude": 1.5,
@@ -1059,8 +1076,6 @@ MODEL_RATIO = {
1059
1076
  "gpt-4-1106-vision-preview": 5,
1060
1077
  "gpt-4-32k": 30,
1061
1078
  "gpt-4-32k-0613": 30,
1062
- "gpt-4-all": 15,
1063
- "gpt-4-gizmo-*": 15,
1064
1079
  "gpt-4-turbo": 5,
1065
1080
  "gpt-4-turbo-2024-04-09": 5,
1066
1081
  "gpt-4-turbo-preview": 5,
@@ -1094,6 +1109,14 @@ MODEL_RATIO = {
1094
1109
  "gpt-oss-20b": 0.025,
1095
1110
  "gpt-oss-120b": 0.05,
1096
1111
 
1112
+ "gpt-5": 0.625,
1113
+ "gpt-5-2025-08-07": 0.625,
1114
+ "gpt-5-chat-latest": 0.625,
1115
+ "gpt-5-mini": 0.125,
1116
+ "gpt-5-mini-2025-08-07": 0.125,
1117
+ "gpt-5-nano": 0.025,
1118
+ "gpt-5-nano-2025-08-07": 0.025,
1119
+
1097
1120
  "o1": 7.5,
1098
1121
  "o1-2024-12-17": 7.5,
1099
1122
 
@@ -1217,9 +1240,6 @@ COMPLETION_RATIO = {
1217
1240
  "grok-3-mini-fast-beta": 4 / 0.6,
1218
1241
  "grok-4": 5,
1219
1242
 
1220
- "gpt-4-all": 4,
1221
- "gpt-4-gizmo-*": 4,
1222
- "gpt-4o-all": 4,
1223
1243
  "gpt-4.5-preview-2025-02-27": 2,
1224
1244
 
1225
1245
  "o1-mini": 4,
@@ -1254,6 +1274,14 @@ COMPLETION_RATIO = {
1254
1274
  "gpt-oss-20b": 4,
1255
1275
  "gpt-oss-120b": 4,
1256
1276
 
1277
+ "gpt-5": 8,
1278
+ "gpt-5-2025-08-07": 8,
1279
+ "gpt-5-chat-latest": 8,
1280
+ "gpt-5-mini": 8,
1281
+ "gpt-5-mini-2025-08-07": 8,
1282
+ "gpt-5-nano": 4,
1283
+ "gpt-5-nano-2025-08-07": 4,
1284
+
1257
1285
  # claude
1258
1286
  "claude-3-5-haiku-20241022": 5,
1259
1287
  "anthropic/claude-3-5-haiku-20241022:beta": 5,
@@ -1273,6 +1301,7 @@ COMPLETION_RATIO = {
1273
1301
  "claude-sonnet-4-20250514-thinking": 5,
1274
1302
  "claude-opus-4-20250514": 5,
1275
1303
  "claude-opus-4-20250514-thinking": 5,
1304
+ "claude-opus-4.1": 5,
1276
1305
 
1277
1306
  "llama-3.1-70b-instruct": 2,
1278
1307
  "meta-llama/Meta-Llama-3.1-70B-Instruct": 2,
@@ -1554,6 +1583,7 @@ COMPLETION_RATIO = {
1554
1583
  "glm-4.5-airx": 3,
1555
1584
  "glm-4.5": 4,
1556
1585
  "glm-4.5-x": 4,
1586
+ "glm-4.5v": 3,
1557
1587
 
1558
1588
  "step-1-flash": 5,
1559
1589
  "step-1-8k": 5,
@@ -1778,3 +1808,6 @@ if __name__ == '__main__':
1778
1808
 
1779
1809
  print("FAL按次")
1780
1810
  print(','.join(FAL_MODELS)) # fal 按次
1811
+
1812
+ print('\n\n')
1813
+ print([k for k in MODEL_PRICE if k.startswith(('minimax-'))] | xjoin(","))