MeUtils 2025.8.21.9.53.54__py3-none-any.whl → 2025.8.26.9.9.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. examples/_openaisdk/openai_chatfire.py +3 -1
  2. examples/_openaisdk/openai_ffire.py +8 -10
  3. examples/_openaisdk/openai_images.py +11 -52
  4. examples/ann/x.py +115 -0
  5. meutils/apis/chatglm/glm_video_api.py +3 -3
  6. meutils/apis/chatglm/zai.py +7 -5
  7. meutils/apis/gitee/openai_images.py +20 -8
  8. meutils/apis/images/generations.py +26 -3
  9. meutils/apis/oneapi/channel.py +14 -33
  10. meutils/apis/oneapi/common.py +17 -6
  11. meutils/{llm/mappers.py → apis/volcengine_apis/doubao_images.py} +7 -4
  12. meutils/apis/volcengine_apis/images.py +4 -3
  13. meutils/data/VERSION +1 -1
  14. meutils/llm/check_utils.py +1 -0
  15. meutils/llm/models/__init__.py +1 -3
  16. meutils/llm/models/ele.py +35 -0
  17. meutils/llm/models/modelscope.py +66 -0
  18. meutils/llm/models/ppio.py +33 -0
  19. meutils/llm/models/siliconflow.py +12 -1
  20. meutils/llm/openai_utils/adapters.py +19 -4
  21. meutils/llm/openai_utils/billing_utils.py +4 -1
  22. meutils/schemas/image_types.py +1 -1
  23. meutils/schemas/oneapi/common.py +41 -12
  24. meutils/str_utils/__init__.py +1 -5
  25. meutils/str_utils/regular_expression.py +74 -0
  26. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/METADATA +262 -262
  27. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/RECORD +32 -28
  28. /meutils/llm/{models_mapping.py → models/models_mapping.py} +0 -0
  29. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/WHEEL +0 -0
  30. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/entry_points.txt +0 -0
  31. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/licenses/LICENSE +0 -0
  32. {meutils-2025.8.21.9.53.54.dist-info → meutils-2025.8.26.9.9.32.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,66 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : modelscope
5
+ # @Time : 2025/8/22 22:47
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+ from meutils.pipe import *
12
+
13
+ models_mapping = {
14
+ "flux-kontext-dev": "MusePublic/FLUX.1-Kontext-Dev",
15
+ # "flux-kontext-dev": "black-forest-labs/FLUX.1-Kontext-dev",
16
+
17
+ "flux.1-krea-dev": "black-forest-labs/FLUX.1-Krea-dev",
18
+
19
+ "moonshotai/kimi-k2-instruct": "moonshotai/Kimi-K2-Instruct",
20
+ "kimi-k2-0711-preview": "moonshotai/Kimi-K2-Instruct",
21
+ "majicflus_v1": "MAILAND/majicflus_v1",
22
+ "deepseek-reasoner": "deepseek-ai/DeepSeek-R1-0528",
23
+
24
+ "deepseek-r1": "deepseek-ai/DeepSeek-R1-0528",
25
+ "deepseek-r1-0528": "deepseek-ai/DeepSeek-R1-0528",
26
+ "deepseek-r1-250528": "deepseek-ai/DeepSeek-R1-0528",
27
+ "deepseek-chat": "deepseek-ai/DeepSeek-V3",
28
+ "deepseek-v3": "deepseek-ai/DeepSeek-V3",
29
+ "deepseek-v3-0324": "deepseek-ai/DeepSeek-V3",
30
+ "deepseek-v3-250324": "deepseek-ai/DeepSeek-V3",
31
+ "deepseek-v3-1-250821": "deepseek-ai/DeepSeek-V3.1",
32
+
33
+ "deepseek-r1-distill-qwen-14b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
34
+ "deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
35
+ "deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
36
+ "qwen2.5-coder-32b-instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
37
+ "qwen2.5-coder-14b-instruct": "Qwen/Qwen2.5-Coder-14B-Instruct",
38
+ "qwen2.5-coder-7b-instruct": "Qwen/Qwen2.5-Coder-7B-Instruct",
39
+ "qwen2.5-72b-instruct": "Qwen/Qwen2.5-72B-Instruct",
40
+ "qwen2.5-32b-instruct": "Qwen/Qwen2.5-32B-Instruct",
41
+ "qwen2.5-14b-instruct": "Qwen/Qwen2.5-14B-Instruct",
42
+ "qwen2.5-7b-instruct": "Qwen/Qwen2.5-7B-Instruct",
43
+ "qwq-32b-preview": "Qwen/QwQ-32B-Preview",
44
+ "qvq-72b-preview": "Qwen/QVQ-72B-Preview",
45
+ "qwen2-vl-7b-instruct": "Qwen/Qwen2-VL-7B-Instruct",
46
+ "qwen2.5-14b-instruct-1m": "Qwen/Qwen2.5-14B-Instruct-1M",
47
+ "qwen2.5-7b-instruct-1m": "Qwen/Qwen2.5-7B-Instruct-1M",
48
+ "qwen2.5-vl-3b-instruct": "Qwen/Qwen2.5-VL-3B-Instruct",
49
+ "qwen2.5-vl-7b-instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
50
+ "qwen2.5-vl-72b-instruct": "Qwen/Qwen2.5-VL-72B-Instruct",
51
+ "qwq-32b": "Qwen/QwQ-32B",
52
+ "qwen2.5-vl-32b-instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
53
+ "qwen3-0.6b": "Qwen/Qwen3-0.6B",
54
+ "qwen3-1.7b": "Qwen/Qwen3-1.7B",
55
+ "qwen3-4b": "Qwen/Qwen3-4B",
56
+ "qwen3-14b": "Qwen/Qwen3-14B",
57
+ "qwen3-30b-a3b": "Qwen/Qwen3-30B-A3B",
58
+ "qwen3-32b": "Qwen/Qwen3-32B",
59
+ "qwen3-235b-a22b": "Qwen/Qwen3-235B-A22B",
60
+ "qwen3-coder-480b-a35b-instruct": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
61
+ "qwen3-235b-a22b-instruct-2507": "Qwen/Qwen3-235B-A22B-Instruct-2507"
62
+
63
+ }
64
+
65
+ if __name__ == '__main__':
66
+ print(','.join(models))
@@ -0,0 +1,33 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # @Project : AI. @by PyCharm
4
+ # @File : ppio
5
+ # @Time : 2025/8/22 11:05
6
+ # @Author : betterme
7
+ # @WeChat : meutils
8
+ # @Software : PyCharm
9
+ # @Description :
10
+
11
+ from meutils.pipe import *
12
+
13
+ models = {
14
+
15
+ "glm-4.5": "zai-org/glm-4.5",
16
+
17
+ "qwen3-235b-a22b-thinking-2507": "qwen/qwen3-235b-a22b-thinking-2507",
18
+ "qwen3-235b-a22b-instruct-2507": "qwen/qwen3-235b-a22b-instruct-2507",
19
+ "kimi-k2-0711-preview": "moonshotai/kimi-k2-instruct",
20
+ "deepseek-v3.1": "deepseek/deepseek-v3.1",
21
+ "deepseek-v3": "deepseek/deepseek-v3-turbo",
22
+ "deepseek-v3-0324": "deepseek/deepseek-v3-0324",
23
+ "deepseek-v3-250324": "deepseek/deepseek-v3-0324",
24
+
25
+ # "deepseek/deepseek-v3/community"
26
+ "deepseek-r1": "deepseek/deepseek-r1-turbo",
27
+ "deepseek-reasoner": "deepseek/deepseek-r1-turbo",
28
+
29
+ "deepseek-r1-250528": "deepseek/deepseek-r1-0528",
30
+
31
+ }
32
+
33
+ print(','.join(models))
@@ -19,6 +19,10 @@ models_mapping = {
19
19
  "deepseek-v3-0324": "deepseek-ai/DeepSeek-V3",
20
20
  "deepseek-v3-250324": "deepseek-ai/DeepSeek-V3",
21
21
  "deepseek-chat": "deepseek-ai/DeepSeek-V3",
22
+ "deepseek-v3.1": "deepseek-ai/DeepSeek-V3.1",
23
+ "deepseek-v3-1-250821": "deepseek-ai/DeepSeek-V3.1",
24
+
25
+
22
26
  "qwen3-32b": "Qwen/Qwen3-32B",
23
27
  "deepseek-r1": "deepseek-ai/DeepSeek-R1",
24
28
  "deepseek-r1-250528": "deepseek-ai/DeepSeek-R1",
@@ -38,7 +42,14 @@ models_mapping = {
38
42
  "deepseek-r1-distill-qwen-7b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
39
43
  "deepseek-r1:8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
40
44
  "deepseek-r1-distill-llama-8b": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
41
- "qwen2.5-32b-instruct": "Qwen/Qwen2.5-32B-Instruct"
45
+ "qwen2.5-32b-instruct": "Qwen/Qwen2.5-32B-Instruct",
46
+
47
+ "flux": "black-forest-labs/FLUX.1-schnell",
48
+ "flux-schnell": "black-forest-labs/FLUX.1-schnell",
49
+ "flux-pro-max": "black-forest-labs/FLUX.1-dev",
50
+ "flux-dev": "black-forest-labs/FLUX.1-dev",
51
+ "flux-pro": "black-forest-labs/FLUX.1-dev",
52
+ "flux.1.1-pro": "black-forest-labs/FLUX.1-dev",
42
53
  }
43
54
 
44
55
 
@@ -16,6 +16,7 @@ from meutils.llm.openai_utils import create_chat_completion
16
16
  from meutils.schemas.openai_types import CompletionRequest, ChatCompletion
17
17
  from meutils.schemas.image_types import ImageRequest
18
18
  from meutils.llm.openai_utils import chat_completion, chat_completion_chunk, create_chat_completion_chunk
19
+ from meutils.str_utils import parse_url, parse_command_string
19
20
 
20
21
 
21
22
  async def stream_to_nostream(
@@ -25,11 +26,12 @@ async def stream_to_nostream(
25
26
 
26
27
 
27
28
  async def chat_for_image(
28
- generate: Callable,
29
+ generate: Optional[Callable],
29
30
  request: CompletionRequest,
30
31
  api_key: Optional[str] = None,
32
+ base_url: Optional[str] = None,
31
33
  ):
32
- generate = partial(generate, api_key=api_key)
34
+ generate = generate and partial(generate, api_key=api_key, base_url=base_url)
33
35
 
34
36
  if not request.stream or request.last_user_content.startswith( # 跳过nextchat
35
37
  (
@@ -66,7 +68,20 @@ async def chat_for_image(
66
68
  prompt=prompt,
67
69
  image=image
68
70
  )
71
+ if not request.image:
72
+ request.image, request.prompt = request.image_and_prompt
69
73
 
74
+ if '--' in request.prompt:
75
+ prompt_dict = parse_command_string(request.prompt)
76
+
77
+ data = {
78
+ **request.model_dump(exclude_none=True, exclude={"extra_fields", "aspect_ratio"}),
79
+ **prompt_dict
80
+ }
81
+ request = ImageRequest(**data)
82
+ logger.debug(request)
83
+
84
+ if not generate: return
70
85
  future_task = asyncio.create_task(generate(request)) # 异步执行
71
86
 
72
87
  async def gen():
@@ -125,8 +140,8 @@ if __name__ == '__main__':
125
140
  request = CompletionRequest(
126
141
  model="deepseek-r1-Distill-Qwen-1.5B",
127
142
  messages=[
128
- {"role": "user", "content": "``hi"}
143
+ {"role": "user", "content": "``hi --a 1"}
129
144
  ],
130
- stream=False,
145
+ stream=True,
131
146
  )
132
147
  arun(chat_for_image(None, request))
@@ -263,7 +263,10 @@ if __name__ == '__main__':
263
263
  model = "Wan-AI/Wan2.1-T2V-14B"
264
264
 
265
265
  # arun(billing_for_async_task(model, task_id=task_id, n=3))
266
- arun(billing_for_async_task(task_id='fal-ai-sync'))
266
+ # arun(billing_for_async_task(task_id='fal-ai-sync'))
267
+ arun(billing_for_async_task())
268
+
269
+
267
270
 
268
271
  # data = {
269
272
  # "model": "doubao-seedance-1-0-pro-250528",
@@ -9,7 +9,7 @@
9
9
  # @Description : todo: 通用比例适配
10
10
 
11
11
  from meutils.pipe import *
12
- from meutils.str_utils import parse_url
12
+ from meutils.str_utils import parse_url, parse_command_string
13
13
  from meutils.math_utils import size2aspect_ratio
14
14
 
15
15
  from pydantic import constr
@@ -25,6 +25,9 @@ MINIMAX_VIDEO = 2 * 0.6
25
25
  FAL = 3
26
26
  FAL_ = 5
27
27
  FAL_MODELS = {
28
+ #
29
+ "fal-ai/clarity-upscaler": 1, # Your request will cost $0.03 per upscaled megapixel.
30
+
28
31
  'fal-kling-video-lipsync-audio-to-video': 0.5,
29
32
 
30
33
  'fal-pixverse-v4.5-effects': 1,
@@ -108,6 +111,7 @@ MODEL_PRICE = {
108
111
  **FAL_MODELS,
109
112
 
110
113
  "qwen-image": 0.05,
114
+ "qwen-image-edit": 0.05,
111
115
 
112
116
  "wan-ai-wan2.1-t2v-14b": 1,
113
117
  "wan-ai-wan2.1-t2v-14b-turbo": 1,
@@ -222,10 +226,6 @@ MODEL_PRICE = {
222
226
  "qwen3-reranker-4b": 0.0011,
223
227
  "qwen3-reranker-8b": 0.0011,
224
228
 
225
- "qwen3-embedding-0.6b": 0.0011,
226
- "qwen3-embedding-4b": 0.0011,
227
- "qwen3-embedding-8b": 0.0011,
228
-
229
229
  # 视频
230
230
  "api-videos-3d": 0.01,
231
231
  "api-videos-3d-1.5": 0.01,
@@ -544,8 +544,18 @@ MODEL_RATIO = {
544
544
  "elevenlabs/scribe_v1": 3 * 0.03 * 1000 / 60 / 2, # Your request will cost $0.03 per minute of audio transcribed
545
545
  "elevenlabs/eleven_multilingual_v2": 3 * 0.1 * 1000 / 2,
546
546
  "elevenlabs/eleven_turbo_v2_5": 3 * 0.05 * 1000 / 2, # Your request will cost $0.05 per thousand characters.
547
+ "elevenlabs/eleven_v3": 3 * 0.05 * 1000 / 2, # Your request will cost $0.05 per thousand characters.
548
+
549
+ 'elevenlabs/eleven_flash_v2_5': 3 * 0.05 * 1000 / 2,
550
+ 'elevenlabs/eleven_turbo_v2': 3 * 0.05 * 1000 / 2,
551
+ 'elevenlabs/eleven_flash_v2': 3 * 0.05 * 1000 / 2,
552
+ 'elevenlabs/eleven_monolingual_v1': 3 * 0.05 * 1000 / 2,
553
+ 'elevenlabs/eleven_english_sts_v2': 3 * 0.05 * 1000 / 2,
554
+ 'elevenlabs/eleven_multilingual_sts_v2': 3 * 0.05 * 1000 / 2,
555
+ 'elevenlabs/eleven_multilingual_v1': 3 * 0.05 * 1000 / 2,
547
556
 
548
557
  "fal-elevenlabs-speech-to-text": 3 * 0.03 * 1000 / 60 / 2,
558
+ "fal-elevenlabs-tts-eleven-v3": 3 * 0.03 * 1000 / 60 / 2,
549
559
  'fal-elevenlabs-tts-turbo-v2.5': 3 * 0.05 * 1000 / 2,
550
560
  'fal-elevenlabs-tts-multilingual-v2': 3 * 0.1 * 1000 / 2,
551
561
 
@@ -593,6 +603,14 @@ MODEL_RATIO = {
593
603
  "jina-embeddings-v4": 0.1,
594
604
  "jina-reranker-m0": 0.1,
595
605
 
606
+ "qwen3-embedding-0.6b": 0.1,
607
+ "qwen3-embedding-4b": 0.1,
608
+ "qwen3-embedding-8b": 0.1,
609
+
610
+ "doubao-embedding-vision-250615": 0.9,
611
+ "doubao-embedding-large-text-250515": 0.25,
612
+ "doubao-embedding-text-240715": 0.35,
613
+
596
614
  # 百川
597
615
  'baichuan4-turbo': 7.5,
598
616
  'baichuan4-air': 0.49,
@@ -668,10 +686,10 @@ MODEL_RATIO = {
668
686
  "glm-4.1v-thinking-flashx": 1,
669
687
 
670
688
  "glm-4.5-flash": 0.1,
671
- "glm-4.5-air": 0.2,
689
+ "glm-4.5-air": 0.4,
672
690
  "glm-4.5-airx": 1,
673
- "glm-4.5": 1,
674
- "glm-4.5-x": 2,
691
+ "glm-4.5": 2,
692
+ "glm-4.5-x": 6,
675
693
  "glm-4.5v": 1,
676
694
 
677
695
  # 阿里千问 https://dashscope.console.aliyun.com/billing
@@ -712,8 +730,8 @@ MODEL_RATIO = {
712
730
  "qwen3-235b-a22b": 1,
713
731
  "qwen-math-plus": 2,
714
732
  "qwen3-coder-480b-a35b-instruct": 3,
715
- "qwen3-235b-a22b-instruct-2507": 1,
716
- "qwen3-235b-a22b-thinking-2507": 3,
733
+ "qwen3-235b-a22b-instruct-2507": 2,
734
+ "qwen3-235b-a22b-thinking-2507": 2,
717
735
 
718
736
  "qwen3-coder-plus": 2,
719
737
  "qwen3-coder-plus-2025-07-22": 2,
@@ -802,6 +820,9 @@ MODEL_RATIO = {
802
820
  "deepseek-v3-8k": 0.5,
803
821
  "deepseek-v3-128k": 5,
804
822
  "deepseek-chat": 1,
823
+ "deepseek-v3.1": 2,
824
+ "deepseek-v3-1-250821": 2,
825
+ "deepseek-v3-1-think": 2,
805
826
 
806
827
  "deepseek-chat-8k": 0.5,
807
828
  "deepseek-chat-64k": 5,
@@ -858,6 +879,7 @@ MODEL_RATIO = {
858
879
  "doubao-seed-1-6-250615": 0.4,
859
880
  "doubao-seed-1-6-thinking-250615": 0.4,
860
881
  "doubao-seed-1-6-thinking-250715": 0.4,
882
+ "doubao-seed-1-6-vision-250815": 0.6,
861
883
 
862
884
  "doubao-1-5-ui-tars-250428": 1.75,
863
885
  "ui-tars-72b": 1.75,
@@ -878,6 +900,8 @@ MODEL_RATIO = {
878
900
  "doubao-pro-4k": 0.4,
879
901
  "doubao-pro-32k": 0.4,
880
902
  "doubao-pro-32k-character": 0.4,
903
+ "doubao-pro-32k-character-241215": 0.4,
904
+
881
905
  "doubao-pro-128k": 2.5,
882
906
  "doubao-pro-256k": 2.5,
883
907
  "doubao-1.5-pro-32k": 0.4,
@@ -1462,7 +1486,7 @@ COMPLETION_RATIO = {
1462
1486
  "qwen3-235b-a22b": 4,
1463
1487
  "qwenlong-l1-32b": 4,
1464
1488
  "qwen3-235b-a22b-instruct-2507": 4,
1465
- "qwen3-235b-a22b-thinking-2507": 4,
1489
+ "qwen3-235b-a22b-thinking-2507": 10,
1466
1490
  "qwen3-coder-480b-a35b-instruct": 4,
1467
1491
 
1468
1492
  "qwen3-coder-plus": 4,
@@ -1477,6 +1501,7 @@ COMPLETION_RATIO = {
1477
1501
  "doubao-seed-1-6-250615": 10,
1478
1502
  "doubao-seed-1-6-thinking-250615": 10,
1479
1503
  "doubao-seed-1-6-thinking-250715": 10,
1504
+ "doubao-seed-1-6-vision-250815": 13.33,
1480
1505
 
1481
1506
  "doubao-1-5-ui-tars-250428": 3.43,
1482
1507
  "ui-tars-72b": 4,
@@ -1491,6 +1516,7 @@ COMPLETION_RATIO = {
1491
1516
  "doubao-pro-4k": 3,
1492
1517
  "doubao-pro-32k": 2.5,
1493
1518
  "doubao-pro-32k-character": 3,
1519
+ "doubao-pro-32k-character-241215": 3,
1494
1520
  "doubao-pro-128k": 3,
1495
1521
  "doubao-pro-256k": 1.8,
1496
1522
  "doubao-1.5-pro-32k": 2.5,
@@ -1548,6 +1574,9 @@ COMPLETION_RATIO = {
1548
1574
  "deepseek-v3-250324": 4,
1549
1575
  "deepseek-chat": 4,
1550
1576
  "deepseek-v3-fast": 4,
1577
+ "deepseek-v3.1": 3,
1578
+ "deepseek-v3-1-250821": 3,
1579
+ "deepseek-v3-1-think": 3,
1551
1580
 
1552
1581
  'deepseek-r1': 4,
1553
1582
  "deepseek-r1-160k": 5,
@@ -1588,8 +1617,8 @@ COMPLETION_RATIO = {
1588
1617
  "glm-4.1v-thinking-flashx": 4,
1589
1618
 
1590
1619
  "glm-4.5-flash": 3,
1591
- "glm-4.5-air": 3,
1592
- "glm-4.5-airx": 3,
1620
+ "glm-4.5-air": 7.5,
1621
+ "glm-4.5-airx": 4,
1593
1622
  "glm-4.5": 4,
1594
1623
  "glm-4.5-x": 4,
1595
1624
  "glm-4.5v": 3,
@@ -7,14 +7,10 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
- import re
11
10
 
12
- import httpx
13
11
 
14
12
  from meutils.pipe import *
15
- # from meutils.str_utils.translater import translater
16
- from meutils.str_utils.regular_expression import parse_url
17
- from meutils.caches import cache
13
+ from meutils.str_utils.regular_expression import parse_url, parse_command_string
18
14
  from meutils.request_utils.crawler import Crawler
19
15
  from urllib.parse import urlencode, parse_qs, parse_qsl, quote_plus, unquote_plus, urljoin
20
16
 
@@ -114,6 +114,80 @@ def parse_url(text: str, for_image=False, fn: Optional[Callable] = None):
114
114
  def parse_url_from_json():
115
115
  pass
116
116
 
117
+ def parse_command_string(command_str: str) -> dict:
118
+ """
119
+ 解析一个类似 "prompt --key1 value1 --key2 value2" 格式的字符串。
120
+
121
+ Args:
122
+ command_str: 输入的命令行字符串。
123
+
124
+ Returns:
125
+ 一个包含 prompt 和解析后参数的字典。
126
+ 例如: {"prompt": "画条狗", "size": "1:1", "n": 10}
127
+ """
128
+ # 初始化结果字典
129
+ result = {}
130
+
131
+ # 使用正则表达式找到第一个参数 '--' 的位置
132
+ # 这比简单的 split 更健壮,可以处理 prompt 中包含 '--' 的情况(虽然不常见)
133
+ match = re.search(r'\s--\w', command_str)
134
+
135
+ if not match:
136
+ # 如果没有找到任何参数,整个字符串都是 prompt
137
+ result['prompt'] = command_str.strip()
138
+ return result
139
+
140
+ first_arg_index = match.start()
141
+
142
+ # 提取 prompt 和参数部分
143
+ prompt = command_str[:first_arg_index].strip()
144
+ args_str = command_str[first_arg_index:].strip()
145
+
146
+ result['prompt'] = prompt
147
+
148
+ # 将参数字符串按空格分割成列表
149
+ # 例如 "--size 1:1 --n 10" -> ['--size', '1:1', '--n', '10']
150
+ args_list = args_str.split()
151
+
152
+ # 遍历参数列表,每次处理一个键值对
153
+ i = 0
154
+ while i < len(args_list):
155
+ arg = args_list[i]
156
+
157
+ # 确认当前项是一个参数键(以 '--' 开头)
158
+ if arg.startswith('--'):
159
+ key = arg[2:] # 去掉 '--' 前缀
160
+
161
+ # 检查后面是否跟着一个值
162
+ if i + 1 < len(args_list) and not args_list[i + 1].startswith('--'):
163
+ value = args_list[i + 1]
164
+
165
+ # 尝试将值转换为整数,如果失败则保留为字符串
166
+ try:
167
+ processed_value = int(value)
168
+ except ValueError:
169
+ processed_value = value
170
+
171
+ # 布尔型
172
+ if processed_value in ['true', 'yes', 'on']:
173
+ processed_value = True
174
+ elif processed_value in ['false', 'no', 'off']:
175
+ processed_value = False
176
+
177
+ result[key] = processed_value
178
+
179
+ i += 2 # 跳过键和值,移动到下一个参数
180
+ else:
181
+ # 处理没有值的参数,例如 --test,可以设为 True 或忽略
182
+ result[key] = True # 或者可以写 pass 直接忽略
183
+ i += 1
184
+ else:
185
+ # 如果某一项不是以 '--' 开头,它可能是格式错误,直接跳过
186
+ i += 1
187
+
188
+ return result
189
+
190
+
117
191
 
118
192
  if __name__ == '__main__':
119
193
  # from urllib.parse import urlparse