MeUtils 2025.8.29.20.5.48__py3-none-any.whl → 2025.9.5.20.57.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. examples/_openaisdk/openai_router.py +14 -4
  2. examples/_openaisdk/openai_volc_doubao.py +139 -0
  3. meutils/apis/chatglm/glm_video_api.py +1 -1
  4. meutils/apis/google/chat.py +6 -2
  5. meutils/apis/google/images.py +25 -19
  6. meutils/apis/images/generations.py +7 -6
  7. meutils/{db/id2redis.py → apis/meituan/__init__.py} +2 -3
  8. meutils/apis/meituan/chat.py +109 -0
  9. meutils/apis/minimax/videos.py +11 -7
  10. meutils/apis/oneapi/tasks.py +3 -1
  11. meutils/apis/ppio/videos.py +1 -1
  12. meutils/clis/server.py +1 -3
  13. meutils/data/VERSION +1 -1
  14. meutils/io/_openai_files.py +1 -0
  15. meutils/io/files_utils.py +17 -2
  16. meutils/llm/check_utils.py +35 -4
  17. meutils/llm/openai_polling/chat.py +66 -15
  18. meutils/llm/openai_utils/adapters.py +45 -30
  19. meutils/schemas/oneapi/common.py +7 -0
  20. meutils/serving/fastapi/gunicorn.conf.py +4 -4
  21. meutils/str_utils/__init__.py +1 -1
  22. meutils/str_utils/regular_expression.py +17 -2
  23. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/METADATA +262 -262
  24. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/RECORD +28 -27
  25. examples/_openaisdk/openai_doubao.py +0 -67
  26. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/WHEEL +0 -0
  27. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/entry_points.txt +0 -0
  28. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/licenses/LICENSE +0 -0
  29. {meutils-2025.8.29.20.5.48.dist-info → meutils-2025.9.5.20.57.37.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,7 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ import os
10
11
 
11
12
  from meutils.pipe import *
12
13
  from meutils.caches import rcache
@@ -227,7 +228,7 @@ async def check_token_for_volc(api_key, threshold: float = 1, purpose: Optional[
227
228
 
228
229
  payload = {
229
230
  # "model": "doubao-seedance-1-0-pro-250528",
230
- "model":"doubao-seedance-1-0-lite-i2v-250428",
231
+ "model": "doubao-seedance-1-0-lite-i2v-250428",
231
232
  "content": [
232
233
  {
233
234
  "type": "text",
@@ -358,6 +359,32 @@ async def check_token_for_gitee(api_key, threshold: float = 1):
358
359
  return False
359
360
 
360
361
 
362
+ @retrying()
363
+ # @rcache(ttl=1 * 24 * 3600, skip_cache_func=skip_cache_func)
364
+ async def check_token_for_openrouter(api_key, threshold: float = 1):
365
+ if not isinstance(api_key, str):
366
+ return await check_tokens(api_key, check_token_for_openrouter)
367
+
368
+ try:
369
+ # {"data": {"total_credits": 215.00205323, "total_usage": 215.20147321775545}} %
370
+ client = AsyncOpenAI(base_url=os.getenv("OPENROUTER_BASE_URL"), api_key=api_key)
371
+ data = await client.get("/credits", cast_to=object)
372
+ # logger.debug(data)
373
+ # data = await client.get("/models/user", cast_to=object)
374
+ # logger.debug(bjson(data))
375
+
376
+ #
377
+ # logger.debug(bjson(data))
378
+
379
+ return True
380
+ except TimeoutException as e:
381
+ raise
382
+
383
+ except Exception as e:
384
+ logger.error(f"Error: {e}\n{api_key}")
385
+ return False
386
+
387
+
361
388
  async def get_valid_token_for_fal(feishu_url: Optional[str] = None):
362
389
  feishu_url = feishu_url or "https://xchatllm.feishu.cn/sheets/Z59Js10DbhT8wdt72LachSDlnlf?sheet=iFRwmM"
363
390
  _ = await get_next_token(feishu_url, check_token_for_fal, ttl=600)
@@ -400,9 +427,9 @@ if __name__ == '__main__':
400
427
 
401
428
  # arun(check_token_for_ppinfra("sk_F0kgPyCMTzmOH_-VCEJucOK8HIrbnLGYm_IWxBToHZQ"))
402
429
 
403
- # arun(check_token_for_volc("c720b2fb-e2be-42a7-a9e5-0a42c29d5766"))
430
+ # arun(check_token_for_volc("827b41d2-7e8c-46e2-9854-0720ca1bd2e4"))
404
431
  # arun(check_token_for_volc("279749bd-ba5e-4962-9c65-eb6604b65594"))
405
- arun(check_token_for_volc("8a5af7cb-42a3-4391-ac40-9d0f4502acde", purpose='seedance'))
432
+ # arun(check_token_for_volc("8a5af7cb-42a3-4391-ac40-9d0f4502acde", purpose='seedance'))
406
433
 
407
434
  # arun(check_token_for_ppinfra("sk_mCb5sRGTi6GXkSRp5F679Rbs0V_Hfee3p85lccGXCOo"))
408
435
 
@@ -410,8 +437,12 @@ if __name__ == '__main__':
410
437
 
411
438
  # arun(check_token_for_fal("56d8a95e-2fe6-44a6-8f7d-f7f9c83eec24:537f06b6044770071f5d86fc7fcd6d6f"))
412
439
 
413
- # arun(check_token_for_ppinfra("sk_ib2EjSVnXfB5hSlVuckpejRNXLIU3MaD1wxvXnsvdxQ", threshold=18000))
440
+ # arun(check_token_for_ppinfra("sk_4Ja29OIUBVwKo5GWx-PRTsRcTyxxRjZDpYxSdPg75QU", threshold=18000))
414
441
 
415
442
  # arun(check_token_for_gitee("NWVXUPI38OQVXZGOEL3D23I9YUQWZPV23GVVBW1X"))
416
443
 
417
444
  # arun(get_valid_token_for_fal())
445
+
446
+ api_key = "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"
447
+ # api_key = None
448
+ arun(check_token_for_openrouter(api_key=api_key or os.getenv("OPENROUTER_API_KEY")))
@@ -34,18 +34,61 @@ class Completions(object):
34
34
  self.client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key, http_client=http_client)
35
35
 
36
36
  async def create(self, request: CompletionRequest):
37
+ # todo:
38
+ # 1. 可思考模型 思考优先
39
+ # request.thinking = {"type": "enabled"}
40
+
41
+ if request.thinking and request.model.startswith("doubao-seed") and request.thinking.get("type") == "disabled":
42
+ choices = {
43
+ "doubao-seed-1.6-250615",
44
+ "doubao-seed-1-6-250615",
45
+ "doubao-seed-1-6-vision-250815"
46
+ } - {request.model}
47
+ request.model = np.random.choice(list(choices))
48
+ elif request.model.startswith("doubao-seed"):
49
+ request.thinking = {"type": "enabled"}
50
+ choices = {
51
+ "doubao-seed-1.6-250615",
52
+ "doubao-seed-1-6-250615",
53
+ "doubao-seed-1-6-vision-250815",
54
+ "doubao-seed-1-6-thinking-250615",
55
+ "doubao-seed-1-6-thinking-250715"
56
+ } - {request.model}
57
+ request.model = np.random.choice(list(choices))
58
+
59
+
60
+ elif request.model.startswith(("doubao-1-5-thinking",)): # thinking
61
+ request.thinking = None
62
+
63
+ choices = {
64
+ "doubao-1-5-thinking-pro-250415",
65
+ "doubao-1-5-thinking-pro-m-250428",
66
+ "doubao-1-5-thinking-vision-pro-250428",
67
+ "doubao-1-5-ui-tars-250428",
68
+ } - {request.model}
69
+ request.model = np.random.choice(list(choices))
70
+
71
+ elif request.model.startswith(("doubao-1-5", "doubao-1.5")): # nothinking
72
+ request.thinking = {"type": "disabled"}
73
+
74
+ choices = {
75
+ "doubao-1-5-pro-32k-250115",
76
+ "doubao-1-5-pro-256k-250115",
77
+ "doubao-1-5-pro-32k-character-250715",
78
+ "doubao-1-5-pro-32k-character-250228",
79
+ "doubao-1.5-vision-pro-250328",
80
+ "doubao-1-5-vision-pro-32k-250115",
81
+ "doubao-1-5-thinking-pro-250415",
82
+ "doubao-1-5-thinking-pro-m-250428",
83
+ "doubao-1-5-thinking-vision-pro-250428",
84
+ "doubao-1-5-ui-tars-250428",
85
+ } - {request.model}
86
+ request.model = np.random.choice(list(choices))
87
+ # todo vision
37
88
 
38
- if request.model.startswith("doubao-seed"): # todo: 1-5
39
- choices = """
40
- doubao-seed-1-6-250615
41
- doubao-seed-1-6-thinking-250615
42
- doubao-seed-1-6-thinking-250715
43
- doubao-seed-1-6-vision-250815
44
- """.split()
45
- request.model = np.random.choice(choices)
46
89
  ###########################################################################
47
90
  # 开启视觉模型
48
- if not any(i in request.model.lower() for i in ["v-", "vl", 'vision', 'doubao-seed']) and (
91
+ if not any(i in request.model.lower() for i in ["v-", "vl", "ui", 'vision', 'doubao-seed']) and (
49
92
  urls := request.last_urls.get("image_url")):
50
93
  # logger.debug(request)
51
94
  if request.model.startswith(("gemini",)):
@@ -125,8 +168,12 @@ if __name__ == '__main__':
125
168
  # model="gemini-2.0-flash",
126
169
  # model="glm-4-flash",
127
170
  # model="deepseek-ai/DeepSeek-V3",
128
- model="Qwen/Qwen3-8B1",
129
-
171
+ # model="Qwen/Qwen3-8B1",
172
+ # model="Qwen/Qwen3-8B1",
173
+ # model="doubao-1-5-thinking",
174
+ # model="doubao-1-5",
175
+ model="doubao-seed",
176
+ thinking={"type": "disabled"},
130
177
  messages=[
131
178
  {
132
179
  "role": "system",
@@ -141,7 +188,7 @@ if __name__ == '__main__':
141
188
  {"role": "user", "content": [
142
189
  {
143
190
  "type": "text",
144
- "text": "解释下" * 30000
191
+ "text": "解释下" * 1
145
192
  },
146
193
  # {
147
194
  # "image_url": {
@@ -153,7 +200,7 @@ if __name__ == '__main__':
153
200
  ]}
154
201
  ],
155
202
  stream=False,
156
- max_tokens=None,
203
+ max_tokens=10,
157
204
  )
158
205
  # arun(Completions().create(request))
159
206
  # d = {
@@ -222,6 +269,10 @@ if __name__ == '__main__':
222
269
  # stream=False,
223
270
  # max_tokens=None,
224
271
  # )
225
- api_key = os.getenv("SILICONFLOW_API_KEY")
226
- base_url = "https://api.siliconflow.cn"
272
+ # api_key = os.getenv("SILICONFLOW_API_KEY")
273
+ # base_url = "https://api.siliconflow.cn"
274
+ # arun(Completions(api_key=api_key, base_url=base_url).create(request))
275
+
276
+ api_key = os.getenv("VOLC_API_KEY")
277
+ base_url = os.getenv("VOLC_BASE_URL")
227
278
  arun(Completions(api_key=api_key, base_url=base_url).create(request))
@@ -33,26 +33,6 @@ async def chat_for_image(
33
33
  ):
34
34
  generate = generate and partial(generate, api_key=api_key, base_url=base_url)
35
35
 
36
- if not request.stream or request.last_user_content.startswith( # 跳过nextchat
37
- (
38
- "hi",
39
- "使用四到五个字直接返回这句话的简要主题",
40
- "简要总结一下对话内容,用作后续的上下文提示 prompt,控制在 200 字以内"
41
- )):
42
- chat_completion.choices[0].message.content = "请设置`stream=True`"
43
- return chat_completion
44
-
45
- # request.stream = True # 流转非流
46
- # response = await chat_for_image(generate, request, api_key)
47
- # chunks = await stream.list(response)
48
- #
49
- # logger.debug(chunks)
50
- #
51
- # if chunks and isinstance(chunks[0], ChatCompletion):
52
- # response = chunks[0]
53
- # else:
54
- # response = create_chat_completion(chunks)
55
- # return response
56
36
  image = None
57
37
  prompt = request.last_user_content
58
38
  if image_urls := request.last_urls.get("image_url"): # image_url
@@ -63,32 +43,55 @@ async def chat_for_image(
63
43
  urls = await to_url(image_urls, content_type="image/png") # 数组
64
44
  image = urls
65
45
 
66
- request = ImageRequest(
46
+ image_request = ImageRequest(
67
47
  model=request.model,
68
48
  prompt=prompt,
69
49
  image=image
70
50
  )
71
- if not request.image:
72
- request.image, request.prompt = request.image_and_prompt
51
+ if not image_request.image:
52
+ image_request.image, image_request.prompt = image_request.image_and_prompt
73
53
 
74
- if '--' in request.prompt:
75
- prompt_dict = parse_command_string(request.prompt)
54
+ if '--' in image_request.prompt:
55
+ prompt_dict = parse_command_string(image_request.prompt)
76
56
  # 缩写补充
77
57
  prompt_dict['aspect_ratio'] = prompt_dict.get('aspect_ratio') or prompt_dict.get('ar')
78
58
 
79
59
  data = {
80
- **request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
60
+ **image_request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
81
61
  **prompt_dict
82
62
  }
83
- request = ImageRequest(**data)
84
- logger.debug(request)
63
+ image_request = ImageRequest(**data)
64
+ logger.debug(image_request)
85
65
 
66
+ # 非流式
67
+ if not request.stream or request.last_user_content.startswith( # 跳过nextchat
68
+ (
69
+ "hi",
70
+ "使用四到五个字直接返回这句话的简要主题",
71
+ "简要总结一下对话内容,用作后续的上下文提示 prompt,控制在 200 字以内"
72
+ )):
73
+ # chat_completion.choices[0].message.content = "请设置`stream=True`"
74
+ # return chat_completion
75
+
76
+ response = await generate(image_request)
77
+
78
+ if not isinstance(response, dict):
79
+ response = response.model_dump()
80
+
81
+ content = ""
82
+ for image in response['data']:
83
+ content += f"""![{image.get("revised_prompt")}]({image['url']})\n\n"""
84
+
85
+ chat_completion.choices[0].message.content = content
86
+ return chat_completion
87
+
88
+ # 流式
86
89
  if not generate: return
87
90
 
88
- future_task = asyncio.create_task(generate(request)) # 异步执行
91
+ future_task = asyncio.create_task(generate(image_request)) # 异步执行
89
92
 
90
93
  async def gen():
91
- text = request.model_dump_json(exclude_none=True).replace("free", "")
94
+ text = image_request.model_dump_json(exclude_none=True).replace("free", "")
92
95
  for i in f"""> 🖌️正在绘画\n\n```json\n{text}\n```\n\n""":
93
96
  await asyncio.sleep(0.05)
94
97
  yield i
@@ -151,6 +154,7 @@ async def chat_for_video(
151
154
 
152
155
 
153
156
  if __name__ == '__main__':
157
+ from meutils.apis.images.generations import generate
154
158
  request = CompletionRequest(
155
159
  model="deepseek-r1-Distill-Qwen-1.5B",
156
160
  messages=[
@@ -159,3 +163,14 @@ if __name__ == '__main__':
159
163
  stream=True,
160
164
  )
161
165
  arun(chat_for_image(None, request))
166
+
167
+ request = CompletionRequest(
168
+ model="gemini-2.5-flash-image-preview",
169
+ messages=[
170
+ {"role": "user", "content": "画条狗"}
171
+ ],
172
+ # stream=True,
173
+ )
174
+ api_key = "sk-MAZ6SELJVtGNX6jgIcZBKuttsRibaDlAskFAnR7WD6PBSN6M-openai"
175
+ base_url = "https://new.yunai.link/v1"
176
+ arun(chat_for_image(generate, request, api_key, base_url))
@@ -101,6 +101,11 @@ FAL_MODELS = {
101
101
  "fal-ai/wan/v2.2-5b/text-to-video": 0.15 * FAL_,
102
102
  "fal-ai/wan/v2.2-5b/image-to-video": 0.15 * FAL_,
103
103
 
104
+ # pika
105
+ "fal-ai/pika/v2.2/text-to-video": 0.45 * FAL,
106
+ "fal-ai/pika/v2.2/image-to-video": 0.45 * FAL,
107
+ "fal-ai/pika/v2.2/pikascenes": 0.45 * FAL,
108
+
104
109
  }
105
110
 
106
111
  FAL_MODELS = {
@@ -661,6 +666,7 @@ MODEL_RATIO = {
661
666
  "moonshotai/kimi-k2-instruct": 2,
662
667
  "kimi-k2-0711-preview": 2,
663
668
  "kimi-k2-turbo-preview": 2,
669
+ "kimi-k2-250711": 2,
664
670
 
665
671
  # 智谱 https://www.bigmodel.cn/pricing
666
672
  'glm-4-9b-chat': 0.1,
@@ -1251,6 +1257,7 @@ COMPLETION_RATIO = {
1251
1257
  "moonshotai/kimi-k2-instruct": 4,
1252
1258
  "kimi-k2-0711-preview": 4,
1253
1259
  "kimi-k2-turbo-preview": 4,
1260
+ "kimi-k2-250711": 4,
1254
1261
 
1255
1262
  "moonshot-v1-8k": 5,
1256
1263
  "moonshot-v1-32k": 4,
@@ -78,10 +78,10 @@ backlog = 1024
78
78
 
79
79
  workers = int(os.getenv("FASTAPI_WORKERS", 2))
80
80
  worker_class = "uvicorn.workers.UvicornWorker"
81
- worker_connections = 1000
82
- timeout = 300
83
- graceful_timeout = 100 # 在接收到重新启动信号后,worker有这么多时间来完成服务请求。超时后(从接收到重启信号开始)仍然活着的worder被强制杀死。
84
- keepalive = 5
81
+ worker_connections = 1024
82
+ timeout = 360
83
+ graceful_timeout = 128 # 在接收到重新启动信号后,worker有这么多时间来完成服务请求。超时后(从接收到重启信号开始)仍然活着的worder被强制杀死。
84
+ keepalive = 16
85
85
  reload = False
86
86
 
87
87
  #
@@ -10,7 +10,7 @@
10
10
 
11
11
 
12
12
  from meutils.pipe import *
13
- from meutils.str_utils.regular_expression import parse_url, parse_command_string
13
+ from meutils.str_utils.regular_expression import parse_url, parse_command_string, parse_base64
14
14
  from meutils.request_utils.crawler import Crawler
15
15
  from urllib.parse import urlencode, parse_qs, parse_qsl, quote_plus, unquote_plus, urljoin
16
16
 
@@ -114,6 +114,7 @@ def parse_url(text: str, for_image=False, fn: Optional[Callable] = None):
114
114
  def parse_url_from_json():
115
115
  pass
116
116
 
117
+
117
118
  def parse_command_string(command_str: str) -> dict:
118
119
  """
119
120
  解析一个类似 "prompt --key1 value1 --key2 value2" 格式的字符串。
@@ -188,6 +189,16 @@ def parse_command_string(command_str: str) -> dict:
188
189
  return result
189
190
 
190
191
 
192
+ def parse_base64(text, pattern=r'!\[.*?\]\((.*?)\)'):
193
+ """
194
+ :param text:
195
+ :param pattern:
196
+ pattern=r'!\[.*?\]\((data:image/.*?)\)'
197
+ :return:
198
+ """
199
+ base64_strings = re.findall(pattern, text)
200
+ return base64_strings
201
+
191
202
 
192
203
  if __name__ == '__main__':
193
204
  # from urllib.parse import urlparse
@@ -258,7 +269,6 @@ https://i.miji.bid/2025/06/10/d018000aed9b872c7b248dccf14c4450.pngA
258
269
  """
259
270
  print(parse_url(text, for_image=True))
260
271
 
261
-
262
272
  # print(parse_url(text, for_image=False))
263
273
 
264
274
  # text = """https://photog.art/api/oss/R2yh8N Convert this portrait into a straight-on,front-facing ID-style headshot."""
@@ -266,4 +276,9 @@ https://i.miji.bid/2025/06/10/d018000aed9b872c7b248dccf14c4450.pngA
266
276
  #
267
277
  # valid_urls = parse_url(text, for_image=True)
268
278
 
269
- print(mimetypes.guess_type("xx.ico"))
279
+ print(mimetypes.guess_type("xx.ico"))
280
+
281
+ text = "这是一个示例文本,包含一个图片:![image](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQ) 这张图片很棒。"
282
+ # text = "这是一个示例文本,。"
283
+
284
+ print(parse_base64(text * 2))