MeUtils 2025.2.18.19.56.22__py3-none-any.whl → 2025.2.25.18.30.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/METADATA +263 -263
  2. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/RECORD +49 -40
  3. examples/_openaisdk/4v.py +3 -2
  4. examples/_openaisdk/openai_chatfire.py +12 -3
  5. examples/_openaisdk/openai_files.py +11 -7
  6. examples/_openaisdk/openai_images.py +18 -13
  7. examples/_openaisdk/openai_jina.py +58 -0
  8. meutils/apis/fal/images.py +83 -19
  9. meutils/apis/fal/pd.py +13 -0
  10. meutils/apis/fal/videos.py +31 -12
  11. meutils/apis/images/recraft.py +9 -6
  12. meutils/apis/jimeng/common.py +5 -1
  13. meutils/apis/jimeng/images.py +19 -8
  14. meutils/apis/kling/api.py +1 -1
  15. meutils/apis/niutrans.py +2 -0
  16. meutils/apis/oneapi/token.py +0 -2
  17. meutils/apis/search/zhipu.py +80 -0
  18. meutils/apis/siliconflow/images.py +4 -1
  19. meutils/apis/sunoai/suno_api.py +42 -0
  20. meutils/apis/to_image/md.py +24 -2
  21. meutils/apis/translator/deeplx.py +2 -1
  22. meutils/apis/vidu/vidu_video.py +2 -1
  23. meutils/caches/acache.py +51 -7
  24. meutils/data/VERSION +1 -1
  25. meutils/data/oneapi/FOOTER.md +2 -2
  26. meutils/data/oneapi/NOTICE.md +1 -151
  27. meutils/data/oneapi/_NOTICE.md +140 -0
  28. meutils/decorators/contextmanagers.py +47 -4
  29. meutils/files/__init__.py +11 -0
  30. meutils/files/qwen_files.py +30 -0
  31. meutils/io/files_utils.py +2 -2
  32. meutils/llm/check_utils.py +2 -1
  33. meutils/llm/clients.py +5 -2
  34. meutils/llm/completions/qwenllm.py +45 -5
  35. meutils/llm/prompts/search_prompts.py +18 -0
  36. meutils/schemas/image_types.py +4 -1
  37. meutils/schemas/oneapi/common.py +35 -7
  38. meutils/schemas/openai_types.py +4 -3
  39. meutils/schemas/suno_types.py +1 -1
  40. meutils/schemas/task_types.py +1 -0
  41. meutils/schemas/vidu_types.py +18 -4
  42. meutils/serving/fastapi/dependencies/auth.py +8 -2
  43. meutils/serving/fastapi/dependencies/headers.py +31 -0
  44. meutils/str_utils/json_utils.py +1 -0
  45. meutils/str_utils/regular_expression.py +7 -2
  46. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/LICENSE +0 -0
  47. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/WHEEL +0 -0
  48. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/entry_points.txt +0 -0
  49. {MeUtils-2025.2.18.19.56.22.dist-info → MeUtils-2025.2.25.18.30.35.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,12 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ """
11
+ File "/usr/local/lib/python3.10/site-packages/meutils/llm/completions/qwenllm.py", line 47, in create
12
+ yield response.choices[0].message.content
13
+ AttributeError: 'str' object has no attribute 'choices'
14
+
15
+ """
10
16
 
11
17
  from openai import AsyncOpenAI
12
18
 
@@ -32,6 +38,16 @@ async def create(request: ChatCompletionRequest):
32
38
  base_url=base_url, api_key=token,
33
39
  default_headers={'User-Agent': ua.random}
34
40
  )
41
+
42
+ # qwen结构
43
+ if any(i in request.model.lower() for i in ("search",)):
44
+ request.model = "qwen-plus-latest"
45
+ request.messages[-1]['chat_type'] = "search"
46
+
47
+ if any(i in request.model.lower() for i in ("qwq", "think")):
48
+ request.model = "qwen-plus-latest"
49
+ request.messages[-1]['feature_config'] = {"thinking_enabled": True}
50
+
35
51
  data = to_openai_params(request)
36
52
 
37
53
  if request.stream:
@@ -43,8 +59,10 @@ async def create(request: ChatCompletionRequest):
43
59
 
44
60
  else:
45
61
  response = await client.chat.completions.create(**data)
46
- # logger.info(response)
47
- yield response.choices[0].message.content
62
+ if isinstance(response, str):
63
+ logger.error(response)
64
+ else:
65
+ yield response.choices[0].message.content
48
66
 
49
67
 
50
68
  if __name__ == '__main__':
@@ -60,16 +78,38 @@ if __name__ == '__main__':
60
78
  # ]
61
79
  request = ChatCompletionRequest(
62
80
  # model="qwen-turbo-2024-11-01",
63
- model="qwen-max-latest",
81
+ # model="qwen-max-latest",
82
+ model="qwen-max-latest-search",
83
+
64
84
  # model="qwen-plus-latest",
65
85
 
66
86
  messages=[
67
87
  {
68
88
  'role': 'user',
69
- 'content': 'hi'
89
+ # 'content': '今天南京天气',
90
+ # 'content': '总结下',
91
+
92
+ # "chat_type": "search",
93
+
94
+ "content": [
95
+ {
96
+ "type": "text",
97
+ "text": "总结下",
98
+ "chat_type": "t2t",
99
+ "feature_config": {
100
+ "thinking_enabled": False
101
+ }
102
+ },
103
+ {
104
+ "type": "file",
105
+ "file": "2d677df1-45b2-4f30-829f-0d42b2b07136"
106
+ }
107
+ ]
108
+
70
109
  },
71
110
 
72
111
  ],
73
- stream=False,
112
+ stream=True,
113
+
74
114
  )
75
115
  arun(create(request))
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : search_prompt
5
+ # @Time : 2025/2/19 10:37
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description : 提示词模板
10
+
11
+ from meutils.pipe import *
12
+
13
+ current_date = datetime.datetime.now().strftime("%Y-%m-%d")
14
+
15
+ system_prompt = f"""你是一个具备网络访问能力的智能助手,在适当情况下,优先使用网络信息(参考信息)来回答,
16
+ 以确保用户得到最新、准确的帮助。当前日期是 {current_date}。"""
17
+
18
+ # deepseek_prompt
@@ -83,7 +83,7 @@ class ImageRequest(BaseModel): # openai
83
83
 
84
84
  # oneapi
85
85
  negative_prompt: Optional[str] = None
86
- guidance: Optional[int] = None
86
+ guidance: Optional[float] = None
87
87
  steps: Optional[int] = None
88
88
 
89
89
  controls: Optional[dict] = {} # 额外参数
@@ -479,6 +479,9 @@ class ImageProcess(BaseModel):
479
479
 
480
480
  response_format: Literal["url", "b64_json"] = "url"
481
481
 
482
+ # class Config:
483
+ # extra = "allow"
484
+
482
485
 
483
486
  if __name__ == '__main__':
484
487
  # print(ASPECT_RATIOS.items())
@@ -101,8 +101,9 @@ MODEL_PRICE = {
101
101
  "api-tripo3d": 0.1,
102
102
 
103
103
  # 图片 音频 视频
104
- "recraftv3": 0.04,
105
- "chat-recraftv3": 0.04,
104
+ "recraftv3": 0.1, # 官方的
105
+ "recraft-api": 0.1,
106
+ "chat-recraftv3": 0.1,
106
107
 
107
108
  "api-asr": 0.01,
108
109
  "api-stt": 0.01,
@@ -277,6 +278,7 @@ MODEL_PRICE = {
277
278
 
278
279
  'deepseek-search': 0.01,
279
280
  'deepseek-r1-search': 0.01,
281
+ "deepseek-r1-search-pro": 0.02,
280
282
  'deepseek-reasoner-search': 0.01,
281
283
 
282
284
  'deepseek-r1-metasearch': 0.03,
@@ -334,6 +336,9 @@ MODEL_RATIO = {
334
336
  # 智能体
335
337
  "gpt-4-plus": 2.5,
336
338
  "gpt-4o-plus": 2.5,
339
+ "jina-deepsearch": 2,
340
+ "deepresearch": 2,
341
+ "deepsearch": 2,
337
342
 
338
343
  # embedding & rerank
339
344
  "rerank-multilingual-v2.0": 0.1,
@@ -364,9 +369,13 @@ MODEL_RATIO = {
364
369
  "claude-3-5-haiku-20241022": 0.5,
365
370
  "anthropic/claude-3-5-haiku-20241022:beta": 0.5,
366
371
 
372
+ # grok
367
373
  "grok-2": 1,
368
374
  "grok-2-1212": 1,
369
375
  "grok-2-vision-1212": 1,
376
+ "grok-3": 2,
377
+ "grok-3-deepsearch": 2,
378
+ "grok-3-reasoner": 2,
370
379
 
371
380
  # 定制
372
381
  "lingxi-all": 1,
@@ -412,7 +421,7 @@ MODEL_RATIO = {
412
421
  "qwen2.5-coder-7b-instruct": 0.05,
413
422
  "qwen2.5-7b-instruct": 0.05,
414
423
  "qwen2.5-14b-instruct": 0.25,
415
- "qwen2.5-32b-instruct": 0.5,
424
+ "qwen2.5-32b-instruct": 1,
416
425
  "qwen2.5-72b-instruct": 2,
417
426
  "qwen2.5-math-72b-instruct": 2,
418
427
  "qwen2.5-coder-32b-instruct": 0.5,
@@ -568,7 +577,7 @@ MODEL_RATIO = {
568
577
  "ERNIE-3.5-128K": 4,
569
578
 
570
579
  "ERNIE-4.0-Turbo-8K": 15,
571
- "ERNIE-4.0-8K": 20,
580
+ "ERNIE-4.0-8K": 10,
572
581
 
573
582
  "text-ada-001": 0.2,
574
583
  "text-babbage-001": 0.25,
@@ -594,6 +603,10 @@ MODEL_RATIO = {
594
603
  "anthropic/claude-3.5-sonnet": 1.5,
595
604
  "anthropic/claude-3.5-sonnet:beta": 4, # 1022
596
605
 
606
+ "claude-3-7-sonnet-thinking": 1.5,
607
+ "claude-3-7-sonnet-latest": 1.5,
608
+ "claude-3-7-sonnet-20250219": 1.5,
609
+
597
610
  "command": 0.5 * 2,
598
611
  "command-light": 0.5 * 2,
599
612
  "command-light-nightly": 0.5 * 2,
@@ -622,9 +635,9 @@ MODEL_RATIO = {
622
635
  "google/gemini-flash-1.5-exp": 0.1, # openrouter免费
623
636
  "google/gemini-flash-1.5-8b-exp": 0.1, # openrouter免费
624
637
 
625
- "gemini-2.0-flash": 0.075,
626
- "gemini-2.0-flash-001": 0.075,
627
- "gemini-2.0-flash-lite-preview-02-05": 0.075,
638
+ "gemini-2.0-flash": 0.0625,
639
+ "gemini-2.0-flash-001": 0.0625,
640
+ "gemini-2.0-flash-lite-preview-02-05": 0.0625,
628
641
 
629
642
  "gemini-2.0-pro": 1.25,
630
643
  "gemini-2.0-pro-exp-02-05": 1.25,
@@ -735,17 +748,26 @@ MODEL_RATIO = {
735
748
  "ep-20240515073409-dlpqp": 5,
736
749
  "microsoft/phi-4": 0.035 * 5,
737
750
 
751
+ "meta-llama/Llama-3.2-11B-Vision-Instruct": 0.1,
752
+
738
753
  }
739
754
 
740
755
  COMPLETION_RATIO = {
741
756
  # 智能体
742
757
  "gpt-4-plus": 5,
743
758
  "gpt-4o-plus": 5,
759
+ "jina-deepsearch": 4,
760
+ "deepresearch": 4,
761
+ "deepsearch": 4,
744
762
 
745
763
  "grok-2": 5,
746
764
  "grok-2-1212": 5,
747
765
  "grok-2-vision-1212": 5,
748
766
 
767
+ "grok-3": 5,
768
+ "grok-3-deepsearch": 5,
769
+ "grok-3-reasoner": 5,
770
+
749
771
  "claude-3-5-haiku-20241022": 5,
750
772
  "anthropic/claude-3-5-haiku-20241022:beta": 5,
751
773
 
@@ -770,6 +792,10 @@ COMPLETION_RATIO = {
770
792
  "anthropic/claude-3.5-sonnet": 5,
771
793
  "anthropic/claude-3.5-sonnet:beta": 5,
772
794
 
795
+ "claude-3-7-sonnet-think": 5,
796
+ "claude-3-7-sonnet-latest": 5,
797
+ "claude-3-7-sonnet-20250219": 5,
798
+
773
799
  "llama-3.1-70b-instruct": 2,
774
800
  "meta-llama/Meta-Llama-3.1-70B-Instruct": 2,
775
801
 
@@ -915,6 +941,8 @@ COMPLETION_RATIO = {
915
941
  "step-1.5v-mini": 5,
916
942
  "step-1o-vision-32k": 5,
917
943
 
944
+ "meta-llama/Llama-3.2-11B-Vision-Instruct": 4
945
+
918
946
  }
919
947
 
920
948
  REDIRECT_MODEL = {
@@ -170,8 +170,10 @@ class ChatCompletionRequest(BaseModel):
170
170
  if self.max_tokens:
171
171
  self.max_tokens = min(self.max_tokens, 4096)
172
172
 
173
- model_config = {
174
- "json_schema_extra": {
173
+ class Config:
174
+ extra = "allow"
175
+
176
+ json_schema_extra = {
175
177
  "examples": [
176
178
  {
177
179
  "model": "gpt-3.5-turbo",
@@ -222,7 +224,6 @@ class ChatCompletionRequest(BaseModel):
222
224
 
223
225
  ]
224
226
  }
225
- }
226
227
 
227
228
 
228
229
  class ImageRequest(BaseModel):
@@ -77,7 +77,7 @@ class SunoAIRequest(BaseModel): # 原始请求体
77
77
 
78
78
  make_instrumental: bool = False
79
79
 
80
- mv: str = "chirp-v3-5" # chirp-v3-5-tau
80
+ mv: str = "chirp-v4" # chirp-v3-5-tau
81
81
  generation_type: str = "TEXT"
82
82
 
83
83
  task: Optional[str] = None # "cover"
@@ -105,6 +105,7 @@ class TaskType(str, Enum):
105
105
  file_extract = "file-extract"
106
106
  moonshot_fileparser = "moonshot-fileparser"
107
107
  textin_fileparser = "textin-fileparser"
108
+ qwen = "qwen"
108
109
 
109
110
  watermark_remove = "watermark-remove"
110
111
 
@@ -92,10 +92,14 @@ class VideoRequest(BaseModel):
92
92
 
93
93
  class ViduRequest(BaseModel):
94
94
  """quality 倍率2"""
95
- model: Union[str, Literal['vidu-1.5', 'vidu-high-performance', 'vidu-high-quality']] = "vidu-high-performance"
95
+ model: Union[
96
+ str, Literal['vidu-2.0', 'vidu-1.5', 'vidu-high-performance', 'vidu-high-quality']] = "vidu-high-performance"
96
97
 
97
98
  prompt: Optional[str] = None
99
+
98
100
  url: Optional[str] = None # ssupload:?id=
101
+ tail_image_url: Optional[str] = None
102
+
99
103
  style: str = "general" # anime
100
104
  aspect_ratio: str = "16:9"
101
105
  duration: int = 4
@@ -103,7 +107,7 @@ class ViduRequest(BaseModel):
103
107
  type: Optional[str] = None # text2video img2video character2video headtailimg2video
104
108
 
105
109
  """vidu-1.5"""
106
- resolution: Literal['512', '720p', 'vidu-high-quality'] = "512"
110
+ resolution: Union[str, Literal['512', '720p', '1080p']] = "512"
107
111
  movement_amplitude: Optional[str] = "auto" # small medium high
108
112
 
109
113
  sample_count: int = 1
@@ -143,12 +147,22 @@ class ViduRequest(BaseModel):
143
147
  }
144
148
  )
145
149
 
146
- if self.model in ("vidu-1.5", "vidu-2.0"):
150
+ if self.tail_image_url:
151
+ type = "headtailimg2video"
152
+ input['prompts'].append(
153
+ {
154
+ "type": "image",
155
+ "content": self.tail_image_url,
156
+ "src_imgs": [self.tail_image_url, ]
157
+ }
158
+ )
159
+
160
+ if self.model in ("vidu-2.0",):
147
161
  self.payload = {
148
162
  "input": input,
149
163
  "type": self.type or type,
150
164
  "settings": {
151
- "model_version": "2.0", #######
165
+ "model_version": "2.0", #######
152
166
  "style": "general",
153
167
  "duration": self.duration,
154
168
 
@@ -14,6 +14,8 @@ from typing import Optional, Union
14
14
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
15
15
  from fastapi import Depends, HTTPException, status
16
16
 
17
+ from meutils.config_utils.lark_utils import get_series, get_next_token
18
+
17
19
  http_bearer = HTTPBearer()
18
20
 
19
21
 
@@ -32,9 +34,13 @@ async def get_bearer_token(
32
34
 
33
35
  token = auth.credentials
34
36
  if token.startswith('redis:'): # 初始化吧,太长?
35
- pass
36
- if ',' in token: # 初始化redis
37
+ if "feishu.cn" in token:
38
+ feishu_url = token.removeprefix("redis:")
39
+ token = await get_next_token(feishu_url) # 初始化redis
40
+
41
+ elif ',' in token: # todo: 初始化redis
37
42
  pass
43
+
38
44
  elif ',' in token: # 分隔符
39
45
  token = np.random.choice(token.split(','))
40
46
 
@@ -0,0 +1,31 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : headers
5
+ # @Time : 2025/2/23 00:20
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+
12
+ from fastapi import FastAPI, Request, Depends, HTTPException
13
+ from typing import Dict, Optional
14
+
15
+
16
+ # 定义一个依赖函数来获取所有请求头
17
+ # def get_headers(request: Request) -> Dict[str, str]:
18
+ # return dict(request.headers)
19
+
20
+ def get_headers(request: Request):
21
+ return request.headers
22
+
23
+ # lambda request: dict(request.headers)
24
+ # @app.get("/headers/")
25
+ # async def read_headers(headers: Dict[str, str] = Depends(get_headers)):
26
+ # # 在这里你可以使用 headers 字典
27
+ # if "upstream_api_key" not in headers:
28
+ # raise HTTPException(status_code=400, detail="API key is required")
29
+ #
30
+ # # 返回所有请求头
31
+ # return {"headers": headers}
@@ -52,6 +52,7 @@ def json_loads(s):
52
52
 
53
53
 
54
54
  def json_path(obj, expr): # todo: 缓存
55
+ """$..["keywords","query","search_result"]"""
55
56
  if isinstance(obj, dict):
56
57
  pass
57
58
  elif isinstance(obj, str):
@@ -145,9 +145,14 @@ if __name__ == '__main__':
145
145
  print(parse_url(text, for_image=False))
146
146
 
147
147
  d = {"url": "https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf\n\n总结下"}
148
- print(parse_url(str(d)))
148
+ # print(parse_url(str(d)))
149
149
 
150
- print('https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf\\n\\n'.strip(r"\n"))
150
+
151
+ text = "https://sc-maas.oss-cn-shanghai.aliyuncs.com/outputs/bb305b60-d258-4542-8b07-5ced549e9896_0.png?OSSAccessKeyId=LTAI5tQnPSzwAnR8NmMzoQq4&Expires=1739948468&Signature=NAswPSXj4AGghDuoNX5rVFIidcs%3D 笑起来"
152
+
153
+ print(parse_url(text))
154
+
155
+ # print('https://mj101-1317487292.cos.ap-shanghai.myqcloud.com/ai/test.pdf\\n\\n'.strip(r"\n"))
151
156
 
152
157
  # print(parse_url("http://154.3.0.117:39666/docs#/default/get_content_preview_spider_playwright_get"))
153
158