MeUtils 2025.2.6.20.41.23__py3-none-any.whl → 2025.2.13.20.50.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/METADATA +25 -25
  2. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/RECORD +37 -31
  3. examples/_openaisdk/openai_chatfire.py +20 -8
  4. examples/_openaisdk/openai_kindo.py +32 -0
  5. examples/_openaisdk/openai_modelscope.py +1 -1
  6. examples/_openaisdk/openai_v.py +38 -0
  7. examples/_openaisdk/openai_x.py +22 -1
  8. examples/_openaisdk/testDeepseek.py +67 -0
  9. meutils/apis/images/edits.py +6 -5
  10. meutils/apis/jimeng/images.py +8 -8
  11. meutils/apis/search/metaso.py +6 -19
  12. meutils/apis/vidu/vidu_video.py +5 -7
  13. meutils/caches/acache.py +10 -10
  14. meutils/caches/c.py +29 -0
  15. meutils/caches/redis_.py +26 -0
  16. meutils/caches/redis_cache.py +0 -2
  17. meutils/caches/redis_mulit.py +30 -0
  18. meutils/data/VERSION +1 -1
  19. meutils/data/oneapi/NOTICE.md +11 -1
  20. meutils/hash_utils.py +4 -0
  21. meutils/llm/clients.py +19 -7
  22. meutils/llm/completions/agents/search.py +18 -7
  23. meutils/llm/completions/reasoner.py +20 -11
  24. meutils/llm/completions/yuanbao.py +37 -28
  25. meutils/llm/openai_utils/common.py +2 -2
  26. meutils/oss/minio_oss.py +2 -0
  27. meutils/schemas/image_types.py +1 -1
  28. meutils/schemas/metaso_types.py +16 -5
  29. meutils/schemas/oneapi/common.py +102 -25
  30. meutils/schemas/openai_types.py +19 -15
  31. meutils/schemas/vidu_types.py +2 -1
  32. meutils/schemas/yuanbao_types.py +8 -0
  33. meutils/serving/fastapi/exceptions/http_error.py +2 -2
  34. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/LICENSE +0 -0
  35. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/WHEEL +0 -0
  36. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/entry_points.txt +0 -0
  37. {MeUtils-2025.2.6.20.41.23.dist-info → MeUtils-2025.2.13.20.50.5.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,11 @@ MJ_RELAX = 1
18
18
  STEP = 2
19
19
 
20
20
  MODEL_PRICE = {
21
+ "o1:free": FREE,
22
+
23
+ "black-forest-labs/FLUX.1-dev": 0.0001,
24
+ "black-forest-labs/FLUX.1-pro": 0.0001,
25
+
21
26
  "images": FREE,
22
27
  # rix
23
28
  "kling_image": 0.025,
@@ -34,6 +39,11 @@ MODEL_PRICE = {
34
39
  "minimax_i2v-01-live": 1.2,
35
40
  "minimax_s2v-01": 1.2,
36
41
 
42
+ # free
43
+ "google/gemini-2.0-flash-thinking-exp:free": 0.00001,
44
+ "google/gemini-2.0-flash-lite-preview-02-05:free": 0.00001,
45
+ "google/gemini-2.0-pro-exp-02-05:free": 0.00001,
46
+
37
47
  # chatfire
38
48
  "ppu-0001": 0.0001,
39
49
  "ppu-001": 0.001,
@@ -45,6 +55,7 @@ MODEL_PRICE = {
45
55
  "chatfire-law": 0.01,
46
56
 
47
57
  "sora-1:1-480p-5s": 1.2,
58
+ "dall-e-3": 0.03,
48
59
 
49
60
  # 虚拟换衣fish
50
61
  "api-kolors-virtual-try-on": 0.1,
@@ -199,6 +210,12 @@ MODEL_PRICE = {
199
210
  "api-cogvideox": 0.1,
200
211
  "api-cogvideox-vip": 0.4,
201
212
 
213
+ #
214
+ "runway_video": 0.6,
215
+ "runway_video2video": 0.6,
216
+ "runway_act_one": 1,
217
+ "runwayml_image_to_video": 0.8,
218
+
202
219
  "api-runwayml-gen3": 0.1,
203
220
 
204
221
  "api-translator": 0.0001,
@@ -212,7 +229,7 @@ MODEL_PRICE = {
212
229
 
213
230
  # all
214
231
  "o1-plus": 0.2,
215
- "o1-pro": 0.6,
232
+ "o1-pro": 1.2,
216
233
 
217
234
  "o1-mini-all": 0.2,
218
235
  "o1-preview-all": 0.6,
@@ -261,6 +278,13 @@ MODEL_PRICE = {
261
278
  "ai-search-pro": 0.1,
262
279
  "ai-search-pro:scholar": 0.1,
263
280
 
281
+ 'deepseek-search': 0.01,
282
+ 'deepseek-r1-search': 0.01,
283
+ 'deepseek-reasoner-search': 0.01,
284
+
285
+ 'deepseek-r1-metasearch': 0.03,
286
+ 'deepseek-reasoner-metasearch': 0.03,
287
+
264
288
  # MJ
265
289
  "mj-chat": 0.3,
266
290
 
@@ -379,7 +403,7 @@ MODEL_RATIO = {
379
403
  "qwen-long": 0.25,
380
404
  "qwen-turbo": 0.05,
381
405
  "qwen-plus": 2,
382
- "qwen-max": 10,
406
+ "qwen-max": 1.2,
383
407
  "qwen-max-longcontext": 20,
384
408
  "qwen-turbo-2024-11-01": 0.15,
385
409
  "qwen-max-latest": 1.2,
@@ -399,6 +423,7 @@ MODEL_RATIO = {
399
423
  "Qwen/QwQ-32B-Preview": 1,
400
424
  "qwq-32b-preview": 1,
401
425
  "qvq-72b-preview": 2,
426
+ "qwen2.5-vl-72b-instruct": 1.25,
402
427
 
403
428
  "qwen1.5-7b-chat": 0.05, # 特价
404
429
  "qwen1.5-14b-chat": 0.7,
@@ -461,29 +486,42 @@ MODEL_RATIO = {
461
486
  "abab5.5s-chat": 2.5,
462
487
 
463
488
  # deepseek
464
- "deepseek-chat": 0.5,
489
+ "deepseek-v3": 1,
490
+ "deepseek-v3-128k": 5,
491
+ "deepseek-chat": 1,
492
+ "deepseek-chat-64k": 5,
493
+ "deepseek-chat-164k": 5,
494
+ "deepseek-chat:function": 4,
465
495
  "deepseek-vl2": 0.5,
466
-
467
496
  "deepseek-ai/deepseek-vl2": 0.5,
468
- "deepseek-llm-67b-chat": 0.5,
469
-
470
- "deepseek-chat:function": 4,
471
-
472
- "deepseek-v3": 0.5,
473
- "deepseek/deepseek-chat": 0.5,
474
- "deepseek-ai/DeepSeek-V3": 0.5,
475
- "accounts/fireworks/models/deepseek-v3": 0.5,
476
497
 
498
+ # deepseek-r1:1.5b,deepseek-r1-distill-qwen-1.5b,deepseek-r1:7b,deepseek-r1-distill-qwen-7b,deepseek-r1:8b,deepseek-r1-distill-llama-8b,deepseek-r1:14b,deepseek-r1-distill-qwen-14b,deepseek-r1:32b,deepseek-r1-distill-qwen-32b,deepseek-r1:70b,deepseek-r1-distill-llama-70b
477
499
  "deepseek-r1:1.5b": 0.1,
500
+ 'deepseek-r1-lite': 0.1, # think
501
+ "deepseek-r1-distill-qwen-1.5b": 0.1,
478
502
  "deepseek-r1:7b": 0.2,
503
+ "deepseek-r1-distill-qwen-7b": 0.2,
504
+ "deepseek-r1:8b": 0.3,
505
+ "deepseek-r1-distill-llama-8b": 0.3,
506
+
479
507
  "deepseek-r1:14b": 0.5,
508
+ "deepseek-r1-distill-qwen-14b": 0.5,
509
+
480
510
  "deepseek-r1:32b": 1,
511
+ "deepseek-r1-distill-qwen-32b": 1,
512
+
513
+ "deepseek-r1:70b": 1.5,
514
+ "deepseek-r1-distill-llama-70b": 1.5,
515
+
481
516
  'deepseek-r1': 2,
482
517
  'deepseek-reasoner': 2,
483
- 'deepseek-think': 0.5,
484
- "deepseek-search": 0.5,
485
- "deepseek-chat-64k": 5,
486
- "deepseek-v3-128k": 5,
518
+
519
+ 'deepseek-r1-think': 1.5,
520
+ 'deepseek-reasoner-think': 1.5,
521
+
522
+ "deepseek-search": 1,
523
+ 'deepseek-r1-search': 2,
524
+ 'deepseek-reasoner-search': 2,
487
525
 
488
526
  # 豆包
489
527
  "doubao-lite-128k": 0.4,
@@ -573,6 +611,7 @@ MODEL_RATIO = {
573
611
  "gemini-1.0-pro-vision-latest": 1,
574
612
  "gemini-exp-1206": 1,
575
613
 
614
+ "gemini-1.5-flash": 0.1,
576
615
  "gemini-1.5-flash-002": 0.3, # 重定向到openrouter
577
616
  "google/gemini-flash-1.5-8b": 0.3, # openrouter $0.0375 $0.15
578
617
 
@@ -581,8 +620,12 @@ MODEL_RATIO = {
581
620
  "google/gemini-flash-1.5-exp": 0.1, # openrouter免费
582
621
  "google/gemini-flash-1.5-8b-exp": 0.1, # openrouter免费
583
622
 
584
- "gemini-2.0-flash": 0.75,
585
- "gemini-2.0-flash-001": 0.75,
623
+ "gemini-2.0-flash": 0.075,
624
+ "gemini-2.0-flash-001": 0.075,
625
+ "gemini-2.0-flash-lite-preview-02-05": 0.075,
626
+
627
+ "gemini-2.0-pro": 1.25,
628
+ "gemini-2.0-pro-exp-02-05": 1.25,
586
629
 
587
630
  "gemini-2.0-flash-exp": 0.5,
588
631
  "gemini-2.0-flash-thinking-exp": 1,
@@ -633,6 +676,8 @@ MODEL_RATIO = {
633
676
  "gpt-4o-2024-11-20": 1.25,
634
677
 
635
678
  "o1": 7.5,
679
+ "o1-2024-12-17": 7.5,
680
+
636
681
  "o1-mini": 1.5,
637
682
  "o1-preview": 7.5,
638
683
  "o1-mini-2024-09-12": 1.5,
@@ -685,7 +730,8 @@ MODEL_RATIO = {
685
730
  "Qwen/Qwen2-VL-72B-Instruct": 2,
686
731
 
687
732
  # 临时
688
- "ep-20240515073409-dlpqp": 5
733
+ "ep-20240515073409-dlpqp": 5,
734
+ "microsoft/phi-4": 0.035 * 5,
689
735
 
690
736
  }
691
737
 
@@ -751,6 +797,7 @@ COMPLETION_RATIO = {
751
797
 
752
798
  "gemini-1.5-pro-001": 4,
753
799
  "gemini-1.5-pro-002": 4,
800
+ "gemini-1.5-flash": 4,
754
801
  "gemini-1.5-flash-002": 4,
755
802
 
756
803
  "gemini-exp-1206": 5,
@@ -763,6 +810,11 @@ COMPLETION_RATIO = {
763
810
  "gemini-2.0-flash-thinking-exp-1219": 5,
764
811
  "gemini-2.0-flash-thinking-exp-01-21": 5,
765
812
 
813
+ "gemini-2.0-flash-lite-preview-02-05": 4,
814
+
815
+ "gemini-2.0-pro": 4,
816
+ "gemini-2.0-pro-exp-02-05": 4,
817
+
766
818
  "hunyuan-a52b-instruct": 5,
767
819
  "qwen2.5-coder-32b-instruct": 3,
768
820
 
@@ -773,7 +825,7 @@ COMPLETION_RATIO = {
773
825
  "qvq-72b-preview": 3,
774
826
 
775
827
  "qwen-long": 4,
776
- "qwen-max": 3,
828
+ "qwen-max": 4,
777
829
  "qwen-vl-max-latest": 3,
778
830
  "qwen-vl-plus-latest": 3,
779
831
  "qwen2-vl-7b-instruct": 5,
@@ -786,6 +838,7 @@ COMPLETION_RATIO = {
786
838
  "qwen2.5-32b-instruct": 4,
787
839
  "qwen2.5-72b-instruct": 4,
788
840
  "qwen2.5-math-72b-instruct": 4,
841
+ "qwen2.5-vl-72b-instruct": 3,
789
842
 
790
843
  "deepseek-vl2": 4,
791
844
  "deepseek-ai/deepseek-vl2": 4,
@@ -809,17 +862,39 @@ COMPLETION_RATIO = {
809
862
  "doubao-vision-lite-32k": 3,
810
863
  "doubao-vision-pro-32k": 3,
811
864
 
865
+ "deepseek-r1:1.5b": 4,
866
+ "deepseek-r1-distill-qwen-1.5b": 4,
867
+ "deepseek-r1:7b": 4,
868
+ "deepseek-r1-distill-qwen-7b": 4,
869
+ "deepseek-r1:8b": 4,
870
+ "deepseek-r1-distill-llama-8b": 4,
871
+ "deepseek-r1:14b": 4,
872
+ "deepseek-r1-distill-qwen-14b": 4,
873
+ "deepseek-r1:32b": 4,
874
+ "deepseek-r1-distill-qwen-32b": 4,
875
+ "deepseek-r1:70b": 4,
876
+ "deepseek-r1-distill-llama-70b": 4,
877
+
812
878
  "deepseek-v3": 4,
813
- "deepseek-search": 1,
879
+ "deepseek-chat": 4,
814
880
  'deepseek-r1': 4,
815
881
  'deepseek-reasoner': 4,
816
- "deepseek/deepseek-chat": 1,
817
- "deepseek-ai/DeepSeek-V3": 1,
818
- "accounts/fireworks/models/deepseek-v3": 1,
882
+ "deepseek-reasoner-164k": 8,
883
+
819
884
  "deepseek-chat:function": 4,
885
+
886
+ "deepseek-chat-8k": 5,
820
887
  "deepseek-chat-64k": 5,
888
+ "deepseek-chat-164k": 5,
889
+
821
890
  "deepseek-v3-128k": 5,
891
+
822
892
  "deepseek-llm-67b-chat": 4,
893
+ 'deepseek-r1-think': 4,
894
+ 'deepseek-reasoner-think': 4,
895
+ "deepseek-search": 5,
896
+ 'deepseek-r1-search': 5,
897
+ 'deepseek-reasoner-search': 5,
823
898
 
824
899
  "glm-zero": 5,
825
900
  "glm-zero-preview": 5,
@@ -992,7 +1067,9 @@ REDIRECT_MODEL = {
992
1067
  # "qwen-2.5-72b": "qwen/qwen-2.5-72b",
993
1068
  "tune-blob": "kaushikaakash04/tune-blob",
994
1069
  "tune-mythomax-l2-13b": "rohan/tune-mythomax-l2-13b",
995
- "tune-wizardlm-2-8x22b": "rohan/tune-wizardlm-2-8x22b"
1070
+ "tune-wizardlm-2-8x22b": "rohan/tune-wizardlm-2-8x22b",
1071
+
1072
+ "microsoft/phi-4": 2,
996
1073
 
997
1074
  }
998
1075
 
@@ -74,13 +74,13 @@ class ChatCompletionChunk(_ChatCompletionChunk):
74
74
 
75
75
 
76
76
  chat_completion = ChatCompletion(
77
- choices=[Choice(message=ChatCompletionMessage(content=""))]
77
+ choices=[Choice(message=ChatCompletionMessage(reasoning_content="", content=""))]
78
78
  )
79
79
  chat_completion_chunk = ChatCompletionChunk(
80
- choices=[ChunkChoice(delta=ChoiceDelta(content=""))]
80
+ choices=[ChunkChoice(delta=ChoiceDelta(reasoning_content="", content=""))]
81
81
  )
82
82
  chat_completion_chunk_stop = ChatCompletionChunk(
83
- choices=[ChunkChoice(delta=ChoiceDelta(content=""), finish_reason="stop")]
83
+ choices=[ChunkChoice(delta=ChoiceDelta(reasoning_content="", content=""), finish_reason="stop")]
84
84
  )
85
85
 
86
86
 
@@ -352,15 +352,19 @@ if __name__ == '__main__':
352
352
  #
353
353
  #
354
354
  # print(A(n=11))
355
- messages = [
356
- {
357
- "role": "user",
358
- "content": [{'role': 'user', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]}]
359
- },
360
-
361
- # {'role': 'user', 'content': [{"type": "image_url", "image_url": {"url": "这是个图片链接"}}]},
362
- ]
363
-
364
- r = ChatCompletionRequest(model="gpt-3.5-turbo", messages=messages)
365
- r.messages[-1]['content'] = [{"type": "image_url", "image_url": {"url": r.urls[-1]}}]
366
- print(r)
355
+ # messages = [
356
+ # {
357
+ # "role": "user",
358
+ # "content": [{'role': 'user', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]}]
359
+ # },
360
+ #
361
+ # # {'role': 'user', 'content': [{"type": "image_url", "image_url": {"url": "这是个图片链接"}}]},
362
+ # ]
363
+ #
364
+ # r = ChatCompletionRequest(model="gpt-3.5-turbo", messages=messages)
365
+ # r.messages[-1]['content'] = [{"type": "image_url", "image_url": {"url": r.urls[-1]}}]
366
+ # print(r)
367
+
368
+ print(chat_completion_chunk)
369
+ print(chat_completion)
370
+ print(chat_completion_chunk_stop)
@@ -10,7 +10,8 @@
10
10
 
11
11
  from meutils.pipe import *
12
12
 
13
- BASE_URL = "https://api.vidu.studio/vidu/v1"
13
+ # BASE_URL = "https://api.vidu.studio/vidu/v1"
14
+ BASE_URL = "https://service.vidu.com/vidu/v1"
14
15
  UPLOAD_BASE_URL = "https://api.vidu.studio/tools/v1" # /files/uploads
15
16
 
16
17
  EXAMPLES = [
@@ -220,12 +220,17 @@ class SSEData(BaseModel):
220
220
  # data: [DONE]
221
221
 
222
222
  content: str = ""
223
+ reasoning_content: str = ""
224
+
223
225
  image: Optional[str] = None
224
226
 
225
227
  chunk: str = ""
226
228
 
227
229
  def __init__(self, **data):
228
230
  super().__init__(**data)
231
+
232
+ logger.debug(self.chunk)
233
+
229
234
  chunk = self.chunk.lstrip("data:")
230
235
 
231
236
  if '"type":"progress"' in chunk:
@@ -248,6 +253,9 @@ class SSEData(BaseModel):
248
253
  # df['title'] = "[" + df['title'] + "](" + df['url'] + ")"
249
254
  # df['image'] = "![](" + df['image'] + ")"
250
255
 
256
+ elif '{"type":"think"' in chunk: # 思考中...
257
+ content = self.reasoning_content = json.loads(chunk).get("content", "")
258
+
251
259
  else:
252
260
  content = ""
253
261
  # chunk.strip() or logger.debug(chunk) # debug
@@ -42,6 +42,7 @@ async def general_exception_handler(request: Request, exc: Exception):
42
42
 
43
43
 
44
44
  async def chatfire_api_exception_handler(request: Request, exc: Exception):
45
+
45
46
  content = {
46
47
  "error":
47
48
  {
@@ -54,7 +55,6 @@ async def chatfire_api_exception_handler(request: Request, exc: Exception):
54
55
 
55
56
  # 默认值
56
57
  reps = None
57
- request_json = {"body": await request.body()}
58
58
  if isinstance(exc, (HTTPStatusError, APIStatusError)):
59
59
  status_code = exc.response.status_code or 500
60
60
 
@@ -71,7 +71,7 @@ async def chatfire_api_exception_handler(request: Request, exc: Exception):
71
71
  if any(code in content_detail for code in {'451', }):
72
72
  content_detail = ""
73
73
 
74
- send_message([request_json, content, content_detail])
74
+ send_message([content, content_detail])
75
75
 
76
76
  return reps or JSONResponse(
77
77
  content=content,