MeUtils 2025.8.7.20.31.52__py3-none-any.whl → 2025.8.11.16.30.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,6 +21,8 @@ from contextlib import asynccontextmanager
21
21
  from meutils.pipe import *
22
22
  from meutils.llm.clients import AsyncOpenAI
23
23
  from meutils.apis.utils import make_request
24
+ from meutils.apis.oneapi.user import get_user_money
25
+ from fastapi import status, HTTPException
24
26
 
25
27
  base_url = "https://api.chatfire.cn/flux/v1"
26
28
 
@@ -200,15 +202,27 @@ async def billing_flow_for_async_task(
200
202
  n: float = 1,
201
203
  api_key: Optional[str] = None
202
204
  ):
203
- if n: # 计费
204
- await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
205
+ if n and (user_money := await get_user_money(api_key)): # 计费
206
+
207
+ # 判断
208
+ if user_money < 1:
209
+ raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
210
+
211
+ # 执行
205
212
  yield
206
213
 
214
+ # 计费
215
+ await billing_for_async_task(model, task_id=task_id, n=n, api_key=api_key)
216
+
207
217
  else: # 不计费
208
218
  a = yield
209
219
  logger.debug(a)
210
220
 
211
221
 
222
+ # # 检查余额
223
+ # if user_money := await get_user_money(api_key):
224
+ # if user_money < 1:
225
+ # raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="余额不足或API-KEY限额")
212
226
  @asynccontextmanager
213
227
  async def billing_flow_for_tokens(
214
228
  model: str = "usage-chat",
@@ -81,6 +81,7 @@ FAL_MODELS = {
81
81
 
82
82
  # veo
83
83
  "fal-ai/veo3/fast": 3.2 * FAL_, # 2 3.2
84
+ "fal-ai/veo3/fast/image-to-video": 3.2 * FAL_,
84
85
 
85
86
  # seedance
86
87
  "fal-ai/bytedance/seedance/v1/lite/text-to-video": 1,
@@ -141,7 +142,7 @@ MODEL_PRICE = {
141
142
  "gemini-2.5-pro-video": 0.1,
142
143
 
143
144
  # rix
144
- "kling_image": 0.05,
145
+ "kling_image": 0.025,
145
146
  "kling_image_expand": 0.3,
146
147
  "kling_virtual_try_on": 1,
147
148
  "kling_effects": 1,
@@ -167,6 +168,7 @@ MODEL_PRICE = {
167
168
  # 火山
168
169
  "jimeng-video-3.0": 0.5,
169
170
  "doubao-seedream-3-0-t2i-250415": 0.1,
171
+ "doubao-seededit-3-0-i2i-250628": 0.1,
170
172
 
171
173
  "doubao-seedance-1-0-pro-250528": 0.5, # 480 1080
172
174
  "api-doubao-seedance-1-0-pro-250528-1080p": 2, # 480 1080
@@ -410,6 +412,7 @@ MODEL_PRICE = {
410
412
  "gpt-4o-image": 0.05,
411
413
  "sora-image": 0.1,
412
414
  "gpt-image-1": 0.1,
415
+ "gpt-5-thinking": 0.05,
413
416
 
414
417
  "gpt-4-gizmo-*": 0.1,
415
418
  "advanced-voice": 1,
@@ -965,6 +968,7 @@ MODEL_RATIO = {
965
968
  "claude-sonnet-4-20250514-thinking": 1.5,
966
969
  "claude-opus-4-20250514": 7.5,
967
970
  "claude-opus-4-20250514-thinking": 7.5,
971
+ "claude-opus-4.1": 7.5,
968
972
 
969
973
  "deepclaude": 1.5,
970
974
  "deep-claude": 1.5,
@@ -1059,8 +1063,6 @@ MODEL_RATIO = {
1059
1063
  "gpt-4-1106-vision-preview": 5,
1060
1064
  "gpt-4-32k": 30,
1061
1065
  "gpt-4-32k-0613": 30,
1062
- "gpt-4-all": 15,
1063
- "gpt-4-gizmo-*": 15,
1064
1066
  "gpt-4-turbo": 5,
1065
1067
  "gpt-4-turbo-2024-04-09": 5,
1066
1068
  "gpt-4-turbo-preview": 5,
@@ -1094,6 +1096,14 @@ MODEL_RATIO = {
1094
1096
  "gpt-oss-20b": 0.025,
1095
1097
  "gpt-oss-120b": 0.05,
1096
1098
 
1099
+ "gpt-5": 0.625,
1100
+ "gpt-5-2025-08-07": 0.625,
1101
+ "gpt-5-chat-latest": 0.625,
1102
+ "gpt-5-mini": 0.125,
1103
+ "gpt-5-mini-2025-08-07": 0.125,
1104
+ "gpt-5-nano": 0.025,
1105
+ "gpt-5-nano-2025-08-07": 0.025,
1106
+
1097
1107
  "o1": 7.5,
1098
1108
  "o1-2024-12-17": 7.5,
1099
1109
 
@@ -1217,9 +1227,6 @@ COMPLETION_RATIO = {
1217
1227
  "grok-3-mini-fast-beta": 4 / 0.6,
1218
1228
  "grok-4": 5,
1219
1229
 
1220
- "gpt-4-all": 4,
1221
- "gpt-4-gizmo-*": 4,
1222
- "gpt-4o-all": 4,
1223
1230
  "gpt-4.5-preview-2025-02-27": 2,
1224
1231
 
1225
1232
  "o1-mini": 4,
@@ -1254,6 +1261,14 @@ COMPLETION_RATIO = {
1254
1261
  "gpt-oss-20b": 4,
1255
1262
  "gpt-oss-120b": 4,
1256
1263
 
1264
+ "gpt-5": 8,
1265
+ "gpt-5-2025-08-07": 8,
1266
+ "gpt-5-chat-latest": 8,
1267
+ "gpt-5-mini": 8,
1268
+ "gpt-5-mini-2025-08-07": 8,
1269
+ "gpt-5-nano": 4,
1270
+ "gpt-5-nano-2025-08-07": 4,
1271
+
1257
1272
  # claude
1258
1273
  "claude-3-5-haiku-20241022": 5,
1259
1274
  "anthropic/claude-3-5-haiku-20241022:beta": 5,
@@ -1273,6 +1288,7 @@ COMPLETION_RATIO = {
1273
1288
  "claude-sonnet-4-20250514-thinking": 5,
1274
1289
  "claude-opus-4-20250514": 5,
1275
1290
  "claude-opus-4-20250514-thinking": 5,
1291
+ "claude-opus-4.1": 5,
1276
1292
 
1277
1293
  "llama-3.1-70b-instruct": 2,
1278
1294
  "meta-llama/Meta-Llama-3.1-70B-Instruct": 2,
@@ -1778,3 +1794,6 @@ if __name__ == '__main__':
1778
1794
 
1779
1795
  print("FAL按次")
1780
1796
  print(','.join(FAL_MODELS)) # fal 按次
1797
+
1798
+ print('\n\n')
1799
+ print([k for k in MODEL_RATIO if k.startswith(('gpt-5'))] | xjoin(","))