MeUtils 2025.3.21.17.22.17__py3-none-any.whl → 2025.3.26.18.50.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/METADATA +263 -263
  2. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/RECORD +36 -28
  3. examples/_openaisdk/openai_google.py +49 -0
  4. meutils/apis/common.py +12 -10
  5. meutils/apis/dashscope/__init__.py +11 -0
  6. meutils/apis/dashscope/audio.py +37 -0
  7. meutils/apis/images/recraft.py +6 -1
  8. meutils/apis/jina/common.py +3 -2
  9. meutils/apis/siliconflow/videos.py +40 -21
  10. meutils/apis/textin_apis/__init__.py +13 -0
  11. meutils/apis/textin_apis/common.py +66 -0
  12. meutils/apis/textin_apis/utils.py +42 -0
  13. meutils/apis/textin_apis/watermark_remove.py +58 -0
  14. meutils/apis/utils.py +1 -1
  15. meutils/caches/common.py +1 -0
  16. meutils/common.py +8 -0
  17. meutils/data/VERSION +1 -1
  18. meutils/data/oneapi/NOTICE.html +4 -1
  19. meutils/io/files_utils.py +4 -2
  20. meutils/io/openai_files.py +2 -5
  21. meutils/io/x.py +12 -58
  22. meutils/llm/completions/chat_gemini.py +2 -2
  23. meutils/llm/completions/chat_plus.py +24 -11
  24. meutils/llm/completions/chat_spark.py +4 -2
  25. meutils/llm/completions/yuanbao.py +12 -6
  26. meutils/notice/feishu.py +3 -0
  27. meutils/request_utils/crawler.py +3 -2
  28. meutils/schemas/image_types.py +1 -5
  29. meutils/schemas/oneapi/common.py +37 -5
  30. meutils/schemas/siliconflow_types.py +24 -2
  31. meutils/schemas/textin_types.py +64 -0
  32. meutils/serving/fastapi/exceptions/http_error.py +29 -9
  33. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/LICENSE +0 -0
  34. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/WHEEL +0 -0
  35. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/entry_points.txt +0 -0
  36. {MeUtils-2025.3.21.17.22.17.dist-info → MeUtils-2025.3.26.18.50.26.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,58 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : watermark_remove
5
+ # @Time : 2025/3/23 11:18
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+ from meutils.pipe import *
12
+ import requests
13
+ import json
14
+
15
+
16
+ def get_file_content(filePath):
17
+ with open(filePath, 'rb') as fp:
18
+ return fp.read()
19
+
20
+
21
+ class CommonOcr(object):
22
+ def __init__(self, img_path=None, is_url=False):
23
+ # 图像水印去除
24
+ self._url = 'https://api.textin.com/ai/service/v1/image/watermark_remove'
25
+ # 请登录后前往 “工作台-账号设置-开发者信息” 查看 x-ti-app-id
26
+ # 示例代码中 x-ti-app-id 非真实数据
27
+ self._app_id = 'c81f*************************e9ff'
28
+ # 请登录后前往 “工作台-账号设置-开发者信息” 查看 x-ti-secret-code
29
+ # 示例代码中 x-ti-secret-code 非真实数据
30
+ self._secret_code = '5508***********************1c17'
31
+ self._img_path = img_path
32
+ self._is_url = is_url
33
+
34
+ def recognize(self):
35
+ head = {}
36
+ try:
37
+ head['x-ti-app-id'] = self._app_id
38
+ head['x-ti-secret-code'] = self._secret_code
39
+ if self._is_url:
40
+ head['Content-Type'] = 'text/plain'
41
+ body = self._img_path
42
+ else:
43
+ image = get_file_content(self._img_path)
44
+ head['Content-Type'] = 'application/octet-stream'
45
+ body = image
46
+ result = requests.post(self._url, data=body, headers=head)
47
+ return result.text
48
+ except Exception as e:
49
+ return e
50
+
51
+
52
+ if __name__ == "__main__":
53
+ # 示例 1:传输文件
54
+ response = CommonOcr(img_path=r'example.jpg')
55
+ print(response.recognize())
56
+ # 示例 2:传输 URL
57
+ response = CommonOcr(img_path='http://example.com/example.jpg', is_url=True)
58
+ print(response.recognize())
meutils/apis/utils.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # @Author : betterme
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
- # @Description :
9
+ # @Description : textinparsex
10
10
 
11
11
  from openai import AsyncClient
12
12
  from meutils.pipe import *
meutils/caches/common.py CHANGED
@@ -41,6 +41,7 @@ def rcache(**kwargs):
41
41
  return cached(
42
42
  cache=RedisCache,
43
43
  **connection_kwargs,
44
+
44
45
  **kwargs
45
46
  )
46
47
 
meutils/common.py CHANGED
@@ -455,6 +455,14 @@ def obj_to_dict(obj):
455
455
 
456
456
 
457
457
  def dict_to_model(data: Dict[str, Any], model_name: str = 'DynamicModel'):
458
+ s = f"""class {model_name}(BaseModel):"""
459
+ for k, v in data.items():
460
+ _type = type(v).__name__
461
+ if isinstance(_type, str):
462
+ v = f"'{v}'"
463
+ s += f"\n\t{k}: {_type} = {v}"
464
+ print(s)
465
+
458
466
  from pydantic import create_model
459
467
 
460
468
  # 动态创建模型类
meutils/data/VERSION CHANGED
@@ -1 +1 @@
1
- 2025.03.21.17.22.17
1
+ 2025.03.26.18.50.26
@@ -7,6 +7,9 @@
7
7
  </head>
8
8
  <body>
9
9
  <!-- <h1>Embedded Webpage using iframe</h1>-->
10
- <iframe src="https://api-notice.chatfire.cn" width="100%" height="600px" frameborder="0"></iframe>
10
+ <!-- <iframe src="https://api-notice.chatfire.cn" width="100%" height="600px" frameborder="0"></iframe>-->
11
+
12
+ <iframe src="https://api.chatfire.cn/docs/5141179m0" width="100%" height="600px" frameborder="0"></iframe>
13
+
11
14
  </body>
12
15
  </html>
meutils/io/files_utils.py CHANGED
@@ -256,8 +256,10 @@ if __name__ == '__main__':
256
256
  # content_type=None))
257
257
 
258
258
  # arun(to_url_fal(url))
259
- # print(guess_mime_type("base64xxxxxxxxxxxxxxxxxx.png"))
259
+ print(guess_mime_type("base64xxxxxxxxxxxxxxxxxx.mp4"))
260
260
 
261
261
  # arun(to_url(Path('img_1.png').read_bytes()))
262
262
 
263
- arun(markdown_base64_to_url("![Image_0](data:image)"))
263
+ # arun(markdown_base64_to_url("![Image_0](data:image)"))
264
+
265
+
@@ -7,10 +7,6 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description : https://bigmodel.cn/dev/activities/freebie/fileextracion
10
- import asyncio
11
-
12
- import httpx
13
- import shortuuid
14
10
 
15
11
  from meutils.pipe import *
16
12
  from meutils.io.files_utils import to_bytes, guess_mime_type
@@ -154,7 +150,8 @@ if __name__ == '__main__':
154
150
 
155
151
  with timer():
156
152
  file = "https://top.baidu.com/board?tab=realtime"
157
- file = "https://oss.ffire.cc/files/百炼系列手机产品介绍.docx"
153
+ # file = "https://oss.ffire.cc/files/百炼系列手机产品介绍.docx"
154
+ # file = "https://app.yinxiang.com/fx/8b8bba1e-b254-40ff-81e1-fa3427429efe"
158
155
 
159
156
  arun(file_extract(file))
160
157
 
meutils/io/x.py CHANGED
@@ -11,65 +11,19 @@
11
11
  from meutils.pipe import *
12
12
  # from minio import Minio
13
13
  from meutils.oss.minio_oss import Minio
14
+ import os
15
+ from openai import OpenAI
14
16
 
15
- # MinIO client setup
16
- # minio_client = Minio(
17
- # "minio-server-url:9000",
18
- # access_key="your-access-key",
19
- # secret_key="your-secret-key",
20
- # secure=True # set to False if not using HTTPS
21
- # )
22
- minio_client = Minio()
23
-
24
-
25
- async def download_and_upload(video_url, bucket_name, object_name):
26
- buffer_size = 5 * 1024 * 1024 # 5MB buffer to meet MinIO's minimum part size
27
-
28
- async with httpx.AsyncClient() as client:
29
- try:
30
- async with client.stream("GET", video_url) as response:
31
- response.raise_for_status()
32
-
33
- total_size = int(response.headers.get('content-length', 0))
34
- buffer = io.BytesIO()
35
- uploaded = 0
36
-
37
- async for chunk in response.aiter_bytes(chunk_size=buffer_size):
38
- buffer.write(chunk)
39
- buffer_size = buffer.tell()
40
- buffer.seek(0)
41
-
42
- if buffer_size >= 5 * 1024 * 1024 or response.is_closed:
43
- try:
44
- minio_client.put_object(
45
- bucket_name,
46
- object_name,
47
- buffer,
48
- length=buffer_size,
49
- part_size=5 * 1024 * 1024,
50
- content_type='video/mp4'
51
- )
52
- uploaded += buffer_size
53
- print(f"Uploaded {uploaded}/{total_size} bytes")
54
-
55
- except Exception as upload_error:
56
- print(f"Unexpected upload error: {upload_error}")
57
- raise
58
-
59
- buffer = io.BytesIO() # Reset buffer after upload
60
-
61
- print("Upload completed")
62
- except httpx.HTTPStatusError as http_error:
63
- print(f"HTTP error occurred: {http_error}")
64
- except Exception as e:
65
- print(f"An unexpected error occurred: {e}")
17
+ client = OpenAI(
18
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
19
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
20
+ )
66
21
 
22
+ file = client.files.create(
23
+ file=open("/Users/betterme/PycharmProjects/AI/docker-compose.yml", "rb"),
24
+ purpose='file-extract'
25
+ )
26
+ client.files.content(file.id)
67
27
 
68
28
  if __name__ == '__main__':
69
- # Usage
70
- url = "https://s22-def.ap4r.com/bs2/upload-ylab-stunt-sgp/se/ai_portal_sgp_queue_m2v_txt2video_camera/b7eded0c-452c-4282-ad0a-02d96bd97f3e/0.mp4"
71
- bucket_name = "videos"
72
- object_name = "video11.mp4"
73
-
74
- with timer():
75
- arun(download_and_upload(url, bucket_name, object_name))
29
+ print(file)
@@ -69,7 +69,7 @@ class Completions(object):
69
69
  message["content"] += [{"type": "text", "text": request.last_user_content}]
70
70
 
71
71
  # 调用模型
72
- logger.debug(request.model_dump_json(indent=4))
72
+ # logger.debug(request.model_dump_json(indent=4))
73
73
 
74
74
  data = to_openai_params(request)
75
75
  response = await self.client.chat.completions.create(**data)
@@ -97,7 +97,7 @@ class Completions(object):
97
97
  yield chunk
98
98
  else:
99
99
  yield chunk
100
- logger.debug(str(chunk))
100
+ # logger.debug(str(chunk))
101
101
 
102
102
 
103
103
  if __name__ == '__main__':
@@ -46,6 +46,8 @@ class Completions(object):
46
46
  :param request:
47
47
  :return:
48
48
  """
49
+ request.model = request.model.removesuffix("-all").removesuffix("-plus") ### 避免死循环
50
+
49
51
  if request.last_user_content.startswith(("画",)): # 画画
50
52
  return await self.create_images(request) # str
51
53
 
@@ -58,7 +60,9 @@ class Completions(object):
58
60
  return metaso.create(request)
59
61
 
60
62
  elif request.last_user_content.startswith(("http",)):
61
- file_url, text = request.last_user_content.split(maxsplit=1) # application/octet-stream
63
+
64
+ file_url, *texts = request.last_user_content.split(maxsplit=1) + ["总结下"] # application/octet-stream
65
+ text = texts[0]
62
66
 
63
67
  if guess_mime_type(file_url).startswith("image"): # 识图
64
68
  request.model = "glm-4v-flash"
@@ -81,7 +85,7 @@ class Completions(object):
81
85
  }
82
86
  ]
83
87
  data = to_openai_params(request)
84
- return await self.client.chat.completions.create(**data)
88
+ return await zhipuai_client.chat.completions.create(**data)
85
89
 
86
90
  elif guess_mime_type(file_url).startswith(("video", "audio")): # 音频 视频
87
91
  request.model = "gemini" # 果果
@@ -106,26 +110,31 @@ class Completions(object):
106
110
  return await self.client.chat.completions.create(**data)
107
111
 
108
112
  else:
109
-
113
+ # logger.debug(f"file_url: {file_url}")
110
114
  file_content = await file_extract(file_url) # 文件问答-单轮
111
115
 
116
+ logger.debug(file_content)
117
+
112
118
  request.messages = [
113
119
  {
114
120
  'role': 'user',
115
121
  'content': f"""{json.dumps(file_content, ensure_ascii=False)}\n\n{text}"""
116
122
  }
117
123
  ]
124
+ # logger.debug(request)
118
125
  data = to_openai_params(request)
119
126
  return await self.client.chat.completions.create(**data)
120
127
 
121
128
  if image_urls := request.last_urls.get("image_url"): # 识图
122
129
  request.model = "glm-4v-flash"
123
130
  data = to_openai_params(request)
124
- return await self.client.chat.completions.create(**data)
131
+ return await zhipuai_client.chat.completions.create(**data)
125
132
 
126
133
  elif file_urls := request.last_urls.get("file_url"):
127
134
  return await self.chat_files(request)
128
135
 
136
+ # todo 标准格式的audio_url video_url
137
+
129
138
  data = to_openai_params(request)
130
139
  return await self.client.chat.completions.create(**data)
131
140
 
@@ -189,15 +198,19 @@ if __name__ == '__main__':
189
198
  'content': [
190
199
  {
191
200
  "type": "text",
192
- "text": "总结下"
201
+ # "text": "总结下"
202
+ "text": "https://app.yinxiang.com/fx/8b8bba1e-b254-40ff-81e1-fa3427429efe 总结下",
203
+
204
+ # "text": "https://app.yinxiang.com/fx/8b8bba1e-b254-40ff-81e1-fa3427429efe"
205
+
193
206
  },
194
207
 
195
- {
196
- "type": "file_url",
197
- "file_url": {
198
- "url": "https://oss.ffire.cc/files/招标文件备案表(第二次).pdf"
199
- }
200
- }
208
+ # {
209
+ # "type": "file_url",
210
+ # "file_url": {
211
+ # "url": "https://oss.ffire.cc/files/招标文件备案表(第二次).pdf"
212
+ # }
213
+ # }
201
214
  ]
202
215
  }])
203
216
 
@@ -26,13 +26,15 @@ class Completions(object):
26
26
 
27
27
  async def create(self, request: CompletionRequest):
28
28
  if request.last_user_content.startswith("http"): # 文件问答-单轮
29
- file_url, content = request.last_user_content.split(maxsplit=1)
29
+ file_url, *texts = request.last_user_content.split(maxsplit=1) + ["总结下"]
30
+ text = texts[0]
31
+
30
32
  file_content = await file_extract(file_url)
31
33
 
32
34
  request.messages = [
33
35
  {
34
36
  'role': 'user',
35
- 'content': f"""{json.dumps(file_content, ensure_ascii=False)}\n\n{content}"""
37
+ 'content': f"""{json.dumps(file_content, ensure_ascii=False)}\n\n{text}"""
36
38
  }
37
39
  ]
38
40
  elif image_urls := request.last_urls.get("image_url"): # 长度为1
@@ -7,9 +7,7 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
- import asyncio
11
10
 
12
- import pandas as pd
13
11
  from aiostream import stream
14
12
 
15
13
  from meutils.pipe import *
@@ -52,6 +50,9 @@ class Completions(object):
52
50
 
53
51
  prompt = request and oneturn2multiturn(request.messages) or image_request.prompt
54
52
 
53
+ if isinstance(prompt, list):
54
+ prompt = prompt[-1].get("text", "") # [{'type': 'text', 'text': 'hi'}]
55
+
55
56
  payload = {
56
57
  "model": "gpt_175B_0404",
57
58
  "chatModelId": request.model,
@@ -101,13 +102,14 @@ class Completions(object):
101
102
  }
102
103
  )
103
104
 
105
+ # logger.debug(bjson(payload))
104
106
  headers = {
105
107
  'cookie': token
106
108
  }
107
109
  async with httpx.AsyncClient(base_url=YUANBAO_BASE_URL, headers=headers, timeout=300) as client:
108
110
  # chatid = (await client.post(API_GENERATE_ID)).text
109
111
  chatid = uuid.uuid4()
110
-
112
+ # https://yuanbao.tencent.com/api/chat/90802631-22dc-4d5d-9d3f-f27f57d5fec8'
111
113
  async with client.stream(method="POST", url=f"{API_CHAT}/{chatid}", json=payload) as response:
112
114
  logger.debug(response.status_code)
113
115
  response.raise_for_status()
@@ -138,8 +140,10 @@ class Completions(object):
138
140
 
139
141
  for i, ref in enumerate(df['title'], 1):
140
142
  references.append(f"[^{i}]: {ref}\n")
143
+ if sse.content:
144
+ yield sse.content
141
145
 
142
- yield sse.content
146
+ # logger.debug(sse.content)
143
147
  if references:
144
148
  yield '\n\n'
145
149
  for ref in references:
@@ -195,13 +199,15 @@ if __name__ == '__main__':
195
199
  # model = 'deep_seek_v3-search'
196
200
  # model = 'deep_seek-search'
197
201
  model = 'deep_seek'
198
- model = 'hunyuan_t1'
202
+ # model = 'hunyuan_t1'
199
203
  model = 'hunyuan_t1-search'
204
+ # model = 'deep_seek-search'
200
205
 
201
206
  arun(Completions().create(
202
207
  CompletionRequest(
203
208
  model=model,
204
- messages=[{'role': 'user', 'content': '南京天气如何'}]
209
+ messages=[{'role': 'user', 'content': '南京天气如何'}],
210
+ stream=True
205
211
  ),
206
212
  # image_request=request,
207
213
  # token=token
meutils/notice/feishu.py CHANGED
@@ -106,6 +106,9 @@ def catch(
106
106
 
107
107
  send_message_for_images = partial(send_message, url=IMAGES)
108
108
 
109
+ httpexception_feishu_url = "https://open.feishu.cn/open-apis/bot/v2/hook/d1c7b67d-b0f8-4067-a2f5-109f20eeb696"
110
+ send_message_for_httpexception = partial(send_message, url=httpexception_feishu_url)
111
+
109
112
  if __name__ == '__main__':
110
113
  # send_message("xxx", title=None)
111
114
  send_message(None, title=None)
@@ -56,10 +56,11 @@ if __name__ == '__main__':
56
56
  # print(Crawler(url).xpath('//*[@id="houselist"]/li/a//text()'))
57
57
 
58
58
  url = "https://chat.tune.app/?id=7f268d94-d2d4-4bd4-a732-f196aa20dceb"
59
+ url = "https://app.yinxiang.com/fx/8b8bba1e-b254-40ff-81e1-fa3427429efe"
59
60
 
60
- # print(Crawler(url).xpath('//script//text()'))
61
+ print(Crawler(url).xpath('//script//text()'))
61
62
 
62
- html_content = httpx.get(url).text
63
+ # html_content = httpx.get(url).text
63
64
 
64
65
 
65
66
  # # 正则表达式匹配以 "/_next/static/chunks/7116-" 开头的 JS 文件
@@ -343,11 +343,6 @@ class RecraftImageRequest(ImageRequest):
343
343
  if self.image_type in {"natural", }:
344
344
  self.image_type = "any"
345
345
 
346
- if "halloween" in self.model:
347
- self.user_controls = self.user_controls or {
348
- "special_event": "halloween"
349
- }
350
-
351
346
 
352
347
  class ImageProcessRequest(BaseModel):
353
348
  task: Optional[str] = None
@@ -508,3 +503,4 @@ if __name__ == '__main__':
508
503
  # print(ImagesResponse(data=[{'url': 1}]))
509
504
 
510
505
  print(RecraftImageRequest(prompt="").model_dump_json())
506
+ print(RecraftImageRequest(prompt=""))
@@ -20,13 +20,15 @@ MINIMAX_VIDEO = 3
20
20
 
21
21
  MODEL_PRICE = {
22
22
  "o1:free": FREE,
23
+ # "claude-3-7-sonnet-code:free": "claude-3-7-sonnet-code"
24
+ "claude-3-7-sonnet-code:free": 0.0001,
23
25
 
24
26
  "black-forest-labs/FLUX.1-dev": 0.0001,
25
27
  "black-forest-labs/FLUX.1-pro": 0.0001,
26
28
 
27
29
  "images": FREE,
28
30
  # rix
29
- "kling_image": 0.025,
31
+ "kling_image": 0.05,
30
32
  "kling_virtual_try_on": 1,
31
33
  "kling_effects": 1,
32
34
 
@@ -66,6 +68,10 @@ MODEL_PRICE = {
66
68
  "sora-1:1-480p-5s": 1.2,
67
69
  "dall-e-3": 0.03,
68
70
 
71
+ # 智能体
72
+ "ppt": 0.1,
73
+ "ppt-islide": 0.1,
74
+
69
75
  # 虚拟换衣fish
70
76
  "api-kolors-virtual-try-on": 0.1,
71
77
  "official-api-kolors-virtual-try-on": 0.8,
@@ -218,8 +224,11 @@ MODEL_PRICE = {
218
224
  "api-translator": 0.0001,
219
225
  "api-voice-clone": 0.01,
220
226
 
227
+ # textin
228
+ "api-textin-image/watermark_remove": 0.03,
229
+
221
230
  # suno
222
- "suno_music": 0.36,
231
+ "suno_music": 0.4,
223
232
  "suno_lyrics": 0.01,
224
233
  "suno_uploads": 0.01,
225
234
  "suno_upload": 0.01,
@@ -374,6 +383,7 @@ MODEL_RATIO = {
374
383
  "grok-3": 2,
375
384
  "grok-3-deepsearch": 2,
376
385
  "grok-3-reasoner": 2,
386
+ "grok-3-deepersearch": 2,
377
387
 
378
388
  # 定制
379
389
  "lingxi-all": 1,
@@ -495,9 +505,11 @@ MODEL_RATIO = {
495
505
 
496
506
  # deepseek
497
507
  "deepseek-v3": 1,
508
+ "deepseek-v3-0324": 1,
498
509
  "deepseek-v3-8k": 0.5,
499
510
  "deepseek-v3-128k": 5,
500
511
  "deepseek-chat": 1,
512
+
501
513
  "deepseek-chat-8k": 0.5,
502
514
  "deepseek-chat-64k": 5,
503
515
  "deepseek-chat-164k": 5,
@@ -514,8 +526,10 @@ MODEL_RATIO = {
514
526
  'deepseek-r1-search': 2,
515
527
  'deepseek-reasoner-search': 2,
516
528
 
517
- 'deepseek-r1-think': 1.5,
518
- 'deepseek-reasoner-think': 1.5,
529
+ 'deepseek-r1-think': 2,
530
+ 'deepseek-reasoner-think': 2,
531
+
532
+ "deepseek-r1-plus": 2,
519
533
 
520
534
  # deepseek-r1:1.5b,deepseek-r1-distill-qwen-1.5b,deepseek-r1:7b,deepseek-r1-distill-qwen-7b,deepseek-r1:8b,deepseek-r1-distill-llama-8b,deepseek-r1:14b,deepseek-r1-distill-qwen-14b,deepseek-r1:32b,deepseek-r1-distill-qwen-32b,deepseek-r1:70b,deepseek-r1-distill-llama-70b
521
535
  "deepseek-r1:1.5b": 0.1,
@@ -591,8 +605,9 @@ MODEL_RATIO = {
591
605
  "text-babbage-001": 0.25,
592
606
  "text-davinci-edit-001": 10,
593
607
 
608
+ "omni-moderation-latest": 0.1,
594
609
  "text-moderation-latest": 0.1,
595
- "text-moderation-stable": 0.1,
610
+
596
611
  "tts-1": 7.5,
597
612
  "tts-1-1106": 7.5,
598
613
  "tts-1-hd": 15,
@@ -615,6 +630,12 @@ MODEL_RATIO = {
615
630
  "claude-3-7-sonnet-latest": 1.5,
616
631
  "claude-3-7-sonnet-20250219": 1.5,
617
632
 
633
+ "deepclaude": 1.5,
634
+ "deep-claude": 1.5,
635
+
636
+ "deep-gemini": 1.5,
637
+ "deep-grok": 1.5,
638
+
618
639
  "command": 0.5 * 2,
619
640
  "command-light": 0.5 * 2,
620
641
  "command-light-nightly": 0.5 * 2,
@@ -652,6 +673,7 @@ MODEL_RATIO = {
652
673
 
653
674
  "gemini-2.0-pro": 1.25,
654
675
  "gemini-2.0-pro-exp-02-05": 1.25,
676
+ "gemini-2.5-pro-exp-03-25": 1.5,
655
677
 
656
678
  "gemini-2.0-flash-thinking-exp": 1,
657
679
  "gemini-2.0-flash-thinking-exp-1219": 1,
@@ -780,6 +802,7 @@ COMPLETION_RATIO = {
780
802
  "grok-3": 5,
781
803
  "grok-3-deepsearch": 5,
782
804
  "grok-3-reasoner": 5,
805
+ "grok-3-deepersearch": 5,
783
806
 
784
807
  "claude-3-5-haiku-20241022": 5,
785
808
  "anthropic/claude-3-5-haiku-20241022:beta": 5,
@@ -860,6 +883,7 @@ COMPLETION_RATIO = {
860
883
 
861
884
  "gemini-2.0-pro": 5,
862
885
  "gemini-2.0-pro-exp-02-05": 5,
886
+ "gemini-2.5-pro-exp-03-25": 5,
863
887
 
864
888
  "gemma2-9b-it": 4,
865
889
  "gemma2-27b-it": 4,
@@ -941,7 +965,9 @@ COMPLETION_RATIO = {
941
965
  "meta-deepresearch": 4,
942
966
 
943
967
  "deepseek-v3": 4,
968
+ "deepseek-v3-0324": 4,
944
969
  "deepseek-chat": 4,
970
+
945
971
  'deepseek-r1': 4,
946
972
  'deepseek-reasoner': 4,
947
973
  "deepseek-reasoner-164k": 8,
@@ -961,6 +987,12 @@ COMPLETION_RATIO = {
961
987
  'deepseek-r1-search': 5,
962
988
  'deepseek-reasoner-search': 5,
963
989
 
990
+ "deepseek-r1-plus": 4,
991
+ "deepclaude": 4,
992
+ "deep-claude": 4,
993
+ "deep-gemini": 4,
994
+ "deep-grok": 4,
995
+
964
996
  "glm-zero": 5,
965
997
  "glm-zero-preview": 5,
966
998
  "glm-4v-flash": 5,
@@ -11,6 +11,9 @@
11
11
 
12
12
  from meutils.pipe import *
13
13
 
14
+ FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=3aA5dH"
15
+ FEISHU_URL_FREE = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=xlvlrH"
16
+
14
17
  BASE_URL = "https://api.siliconflow.cn/v1"
15
18
 
16
19
  EXAMPLES = [
@@ -72,9 +75,28 @@ class RerankRequest(BaseModel):
72
75
 
73
76
  class VideoRequest(BaseModel):
74
77
  model: str
78
+
75
79
  prompt: str
76
- image: Optional[str] = None
80
+ negative_prompt: Optional[str] = None
81
+
82
+ image: Optional[str] = None # 图生视频
83
+
77
84
  seed: Optional[int] = None
78
85
 
86
+ # response_format: Optional[str] = None # "mp4" fal
87
+
79
88
  class Config:
80
- frozen = True
89
+ extra = "allow"
90
+
91
+ # frozen = True
92
+ # populate_by_name = True
93
+
94
+ json_schema_extra = {
95
+ "examples": [
96
+ {
97
+ "model": "Wan-AI/Wan2.1-I2V-14B-720P-Turbo",
98
+ "prompt": "画条狗",
99
+ "image": "https://oss.ffire.cc/files/kling_watermark.png"
100
+ },
101
+ ]
102
+ }