MeUtils 2025.3.4.15.59.15__py3-none-any.whl → 2025.3.5.19.55.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/METADATA +264 -264
  2. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/RECORD +56 -30
  3. examples/_openaisdk/open_router.py +2 -1
  4. examples/_openaisdk/openai_files.py +16 -5
  5. examples/_openaisdk/openai_moon.py +22 -19
  6. meutils/apis/baidu/bdaitpzs.py +1 -0
  7. meutils/apis/jimeng/common.py +1 -1
  8. meutils/apis/oneapi/common.py +4 -4
  9. meutils/apis/proxy/ips.py +2 -0
  10. meutils/caches/common.py +4 -0
  11. meutils/data/VERSION +1 -1
  12. meutils/data/oneapi/NOTICE.html +12 -0
  13. meutils/data/oneapi/index.html +275 -0
  14. meutils/io/_openai_files.py +31 -0
  15. meutils/io/openai_files.py +138 -0
  16. meutils/io/parsers/__init__.py +10 -0
  17. meutils/io/parsers/fileparser/PDF/346/212/275/345/217/226.py +58 -0
  18. meutils/io/parsers/fileparser/__init__.py +11 -0
  19. meutils/io/parsers/fileparser/common.py +91 -0
  20. meutils/io/parsers/fileparser/demo.py +41 -0
  21. meutils/io/parsers/fileparser/filetype/__init__.py +10 -0
  22. meutils/io/parsers/fileparser/filetype/__main__.py +37 -0
  23. meutils/io/parsers/fileparser/filetype/filetype.py +98 -0
  24. meutils/io/parsers/fileparser/filetype/helpers.py +140 -0
  25. meutils/io/parsers/fileparser/filetype/match.py +155 -0
  26. meutils/io/parsers/fileparser/filetype/types/__init__.py +118 -0
  27. meutils/io/parsers/fileparser/filetype/types/application.py +22 -0
  28. meutils/io/parsers/fileparser/filetype/types/archive.py +687 -0
  29. meutils/io/parsers/fileparser/filetype/types/audio.py +212 -0
  30. meutils/io/parsers/fileparser/filetype/types/base.py +29 -0
  31. meutils/io/parsers/fileparser/filetype/types/document.py +256 -0
  32. meutils/io/parsers/fileparser/filetype/types/font.py +115 -0
  33. meutils/io/parsers/fileparser/filetype/types/image.py +383 -0
  34. meutils/io/parsers/fileparser/filetype/types/isobmff.py +33 -0
  35. meutils/io/parsers/fileparser/filetype/types/video.py +223 -0
  36. meutils/io/parsers/fileparser/filetype/utils.py +84 -0
  37. meutils/io/parsers/fileparser/filetype.py +41 -0
  38. meutils/io/parsers/fileparser/mineru.py +48 -0
  39. meutils/io/parsers/fileparser/pdf.py +30 -0
  40. meutils/io/parsers/fileparser//350/241/250/346/240/274/346/212/275/345/217/226.py +118 -0
  41. meutils/llm/check_utils.py +33 -2
  42. meutils/llm/clients.py +1 -0
  43. meutils/llm/completions/chat_gemini.py +72 -0
  44. meutils/llm/completions/{chat_all.py → chat_plus.py} +32 -8
  45. meutils/llm/completions/{agents/file.py → chat_spark.py} +46 -26
  46. meutils/llm/completions/qwenllm.py +57 -16
  47. meutils/llm/completions/yuanbao.py +29 -3
  48. meutils/llm/openai_utils/common.py +2 -2
  49. meutils/schemas/oneapi/common.py +22 -20
  50. meutils/schemas/openai_types.py +56 -30
  51. meutils/schemas/yuanbao_types.py +6 -7
  52. meutils/types.py +2 -0
  53. meutils/data/oneapi/_NOTICE.html +0 -278
  54. meutils/data/oneapi/_NOTICE.md +0 -140
  55. meutils/llm/completions/gemini.py +0 -69
  56. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/LICENSE +0 -0
  57. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/WHEEL +0 -0
  58. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/entry_points.txt +0 -0
  59. {MeUtils-2025.3.4.15.59.15.dist-info → MeUtils-2025.3.5.19.55.22.dist-info}/top_level.txt +0 -0
@@ -17,18 +17,19 @@ AttributeError: 'str' object has no attribute 'choices'
17
17
  from openai import AsyncOpenAI
18
18
 
19
19
  from meutils.pipe import *
20
- from meutils.io.files_utils import to_bytes
21
20
  from meutils.decorators.retry import retrying
21
+ from meutils.io.files_utils import to_bytes, guess_mime_type
22
+ from meutils.caches import rcache
22
23
 
23
24
  from meutils.llm.clients import qwen_client
24
25
  from meutils.llm.openai_utils import to_openai_params
25
26
 
26
27
  from meutils.config_utils.lark_utils import get_next_token_for_polling
27
- from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, ChatCompletionRequest, CompletionUsage
28
+ from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, CompletionRequest, CompletionUsage
28
29
 
29
30
  FEISHU_URL = "https://xchatllm.feishu.cn/sheets/Bmjtst2f6hfMqFttbhLcdfRJnNf?sheet=PP1PGr"
30
31
 
31
- base_url = "https://chat.qwenlm.ai/api"
32
+ base_url = "https://chat.qwen.ai/api"
32
33
 
33
34
  from fake_useragent import UserAgent
34
35
 
@@ -36,14 +37,18 @@ ua = UserAgent()
36
37
 
37
38
 
38
39
  @retrying()
39
- async def to_file(file, mime_type: str = "application/octet-stream"):
40
- file_bytes = await to_bytes(file)
41
- file = ("filename", file_bytes, mime_type)
40
+ @rcache(ttl=3600, serializer='pickle')
41
+ async def to_file(file):
42
+ filename = Path(file).name if isinstance(file, str) else 'untitled'
43
+ mime_type = guess_mime_type(file)
44
+ file_bytes: bytes = await to_bytes(file)
45
+ file = (filename, file_bytes, mime_type)
42
46
  file_object = await qwen_client.files.create(file=file, purpose="file-extract")
47
+ logger.debug(file_object)
43
48
  return file_object
44
49
 
45
50
 
46
- async def create(request: ChatCompletionRequest, token: Optional[str] = None): # ChatCompletionRequest 重构
51
+ async def create(request: CompletionRequest, token: Optional[str] = None): # ChatCompletionRequest 重构
47
52
 
48
53
  token = token or await get_next_token_for_polling(feishu_url=FEISHU_URL)
49
54
 
@@ -54,11 +59,12 @@ async def create(request: ChatCompletionRequest, token: Optional[str] = None):
54
59
  )
55
60
 
56
61
  # qwen结构
57
- if any(i in request.model.lower() for i in ("search",)):
62
+ model = request.model.lower()
63
+ if any(i in model for i in ("search",)):
58
64
  request.model = "qwen-max-latest"
59
65
  request.messages[-1]['chat_type'] = "search"
60
66
 
61
- if any(i in request.model.lower() for i in ("qwq", "think")):
67
+ if any(i in model for i in ("qwq", "think")): # qwq-max-search
62
68
  request.model = "qwen-max-latest"
63
69
  request.messages[-1]['feature_config'] = {"thinking_enabled": True}
64
70
 
@@ -67,6 +73,7 @@ async def create(request: ChatCompletionRequest, token: Optional[str] = None):
67
73
  # # await to_file
68
74
  last_message = request.messages[-1]
69
75
  logger.debug(last_message)
76
+
70
77
  if last_message.get("role") == "user":
71
78
  user_content = last_message.get("content")
72
79
  if isinstance(user_content, list):
@@ -79,10 +86,26 @@ async def create(request: ChatCompletionRequest, token: Optional[str] = None):
79
86
 
80
87
  elif content.get("type") == 'image_url':
81
88
  url = content.get(content.get("type")).get("url")
82
- file_object = await to_file(url, "image/png")
89
+ file_object = await to_file(url)
83
90
 
84
91
  user_content[i] = {"type": "image", "image": file_object.id}
85
92
 
93
+ elif user_content.startswith("http"):
94
+ file_url, user_content = user_content.split(maxsplit=1)
95
+
96
+ user_content = [{"type": "text", "text": user_content}]
97
+
98
+ file_object = await to_file(file_url)
99
+
100
+ content_type = file_object.meta.get("content_type", "")
101
+ if content_type.startswith("image"):
102
+ user_content.append({"type": "image", "image": file_object.id})
103
+ else:
104
+ user_content.append({"type": "file", "file": file_object.id})
105
+
106
+ request.messages[-1]['content'] = user_content
107
+
108
+ logger.debug(request)
86
109
  data = to_openai_params(request)
87
110
  if request.stream:
88
111
  _chunk = ""
@@ -133,17 +156,19 @@ if __name__ == '__main__':
133
156
  {
134
157
  "type": "file_url",
135
158
  "file_url": {
136
- "url": "https://oss.ffire.cc/files/%E6%8B%9B%E6%A0%87%E6%96%87%E4%BB%B6%E5%A4%87%E6%A1%88%E8%A1%A8%EF%BC%88%E7%AC%AC%E4%BA%8C%E6%AC%A1%EF%BC%89.pdf"
159
+ "url": "https://oss.ffire.cc/files/AIGC.pdf"
137
160
  }
138
161
  }
139
162
 
140
163
  ]
141
164
 
142
- request = ChatCompletionRequest(
165
+ request = CompletionRequest(
143
166
  # model="qwen-turbo-2024-11-01",
144
- # model="qwen-max-latest",
167
+ model="qwen-max-latest",
145
168
  # model="qwen-max-latest-search",
146
- model="qwq-max",
169
+ # model="qwq-max",
170
+ # model="qwq-max-search",
171
+
147
172
  # model="qwen2.5-vl-72b-instruct",
148
173
 
149
174
  # model="qwen-plus-latest",
@@ -152,8 +177,8 @@ if __name__ == '__main__':
152
177
  {
153
178
  'role': 'user',
154
179
  # 'content': '今天南京天气',
155
- 'content': "9.8 9.11哪个大",
156
- # 'content': '总结下',
180
+ # 'content': "9.8 9.11哪个大",
181
+ 'content': 'https://oss.ffire.cc/files/AIGC.pdf 总结下',
157
182
 
158
183
  # "chat_type": "search",
159
184
 
@@ -174,6 +199,22 @@ if __name__ == '__main__':
174
199
  # }
175
200
  # ]
176
201
 
202
+ # "content": [
203
+ # {
204
+ # "type": "text",
205
+ # "text": "总结下",
206
+ # "chat_type": "t2t",
207
+ # "feature_config": {
208
+ # "thinking_enabled": False
209
+ # }
210
+ # },
211
+ # {
212
+ # "type": "file_url",
213
+ # "file_url": {
214
+ # "url": 'xxxxxxx'
215
+ # }
216
+ # }
217
+ # ]
177
218
  # "content": [
178
219
  # {
179
220
  # "type": "text",
@@ -9,13 +9,14 @@
9
9
  # @Description :
10
10
  import asyncio
11
11
 
12
+ import pandas as pd
12
13
  from aiostream import stream
13
14
 
14
15
  from meutils.pipe import *
15
16
  from meutils.io.image import image2nowatermark_image
16
17
 
17
18
  from meutils.llm.utils import oneturn2multiturn
18
- from meutils.schemas.openai_types import ChatCompletionRequest
19
+ from meutils.schemas.openai_types import CompletionRequest
19
20
  from meutils.schemas.image_types import HunyuanImageRequest
20
21
 
21
22
  from meutils.schemas.yuanbao_types import FEISHU_URL, SSEData, YUANBAO_BASE_URL, API_CHAT, API_GENERATE_ID, \
@@ -41,7 +42,7 @@ class Completions(object):
41
42
 
42
43
  async def create(
43
44
  self,
44
- request: Optional[ChatCompletionRequest] = None,
45
+ request: Optional[CompletionRequest] = None,
45
46
  image_request: Optional[HunyuanImageRequest] = None,
46
47
  token: Optional[str] = None
47
48
  ):
@@ -111,6 +112,7 @@ class Completions(object):
111
112
  logger.debug(response.status_code)
112
113
  response.raise_for_status()
113
114
 
115
+ reasoning = "<think>\n" # </think>
114
116
  async for chunk in response.aiter_lines():
115
117
  sse = SSEData(chunk=chunk)
116
118
  if image_request and sse.image:
@@ -118,6 +120,24 @@ class Completions(object):
118
120
  yield sse.image
119
121
 
120
122
  if request:
123
+ if sse.reasoning_content:
124
+ yield reasoning
125
+ yield sse.reasoning_content
126
+ reasoning = ""
127
+ elif sse.content and reasoning == "":
128
+ reasoning = "\n</think>"
129
+ yield reasoning
130
+
131
+ if sse.search_content:
132
+ df = pd.DataFrame(sse.search_content)
133
+ df['title'] = "[" + df['title'] + "](" + df['url'] + ")"
134
+ df['sourceName'] = "![" + df['sourceName'] + "](" + df['icon_url'] + ")"
135
+ df = df[['title', 'web_site_name', 'sourceName', "publish_time"]]
136
+ df.index += 1
137
+ yield '> Search\n'
138
+ yield df.to_markdown()
139
+ yield '\n\n'
140
+
121
141
  yield sse.content
122
142
 
123
143
  def generate_id(self, random: bool = True):
@@ -166,9 +186,15 @@ if __name__ == '__main__':
166
186
 
167
187
  # async2sync_generator(Completions(api_key).achat('画条狗')) | xprint
168
188
  # request = HunyuanImageRequest(prompt='画条狗', size='16:9')
189
+ # deep_seek deep_seek_v3 hunyuan_t1 hunyuan_gpt_175B_0404
190
+ # model = 'deep_seek_v3-search'
191
+ model = 'deep_seek-search'
169
192
 
170
193
  arun(Completions().create(
171
- ChatCompletionRequest(messages=[{'role': 'user', 'content': '南京天气如何'}]),
194
+ CompletionRequest(
195
+ model=model,
196
+ messages=[{'role': 'user', 'content': '南京天气如何'}]
197
+ ),
172
198
  # image_request=request,
173
199
  # token=token
174
200
  ))
@@ -21,7 +21,7 @@ from meutils.apis.oneapi.user import get_user_money, get_api_key_log
21
21
  from meutils.apis.oneapi.token import get_api_key_money
22
22
 
23
23
  from meutils.schemas.oneapi import MODEL_PRICE
24
- from meutils.schemas.openai_types import ChatCompletionRequest, TTSRequest, STTRequest
24
+ from meutils.schemas.openai_types import CompletionRequest, ChatCompletionRequest, TTSRequest, STTRequest
25
25
  from meutils.schemas.openai_types import ChatCompletion, ChatCompletionChunk, CompletionUsage
26
26
  from meutils.schemas.openai_types import chat_completion, chat_completion_chunk, chat_completion_chunk_stop # todo
27
27
 
@@ -37,7 +37,7 @@ AUDIO_TRANSCRIPTIONS_PARAMS = get_function_params(fn=OpenAI(api_key='').audio.tr
37
37
 
38
38
 
39
39
  def to_openai_params(
40
- request: Union[dict, ChatCompletionRequest, ImageRequest, TTSRequest, STTRequest],
40
+ request: Union[dict, CompletionRequest, ChatCompletionRequest, ImageRequest, TTSRequest, STTRequest],
41
41
  redirect_model: Optional[str] = None,
42
42
  ) -> dict:
43
43
  data = copy.deepcopy(request)
@@ -280,28 +280,28 @@ MODEL_PRICE = {
280
280
  # MJ
281
281
  "mj-chat": 0.3,
282
282
 
283
- "mj_fast_blend": 0.08,
283
+ "mj_fast_blend": 0.1,
284
284
  "mj_fast_custom_oom": 0,
285
- "mj_fast_describe": 0.04,
286
- "mj_fast_high_variation": 0.08,
287
- "mj_fast_imagine": 0.08,
285
+ "mj_fast_describe": 0.05,
286
+ "mj_fast_high_variation": 0.1,
287
+ "mj_fast_imagine": 0.1,
288
288
  "mj_fast_inpaint": 0,
289
- "mj_fast_low_variation": 0.08,
290
- "mj_fast_modal": 0.08,
291
- "mj_fast_pan": 0.08,
289
+ "mj_fast_low_variation": 0.1,
290
+ "mj_fast_modal": 0.1,
291
+ "mj_fast_pan": 0.1,
292
292
  "mj_fast_pic_reader": 0,
293
293
  "mj_fast_prompt_analyzer": 0,
294
294
  "mj_fast_prompt_analyzer_extended": 0,
295
- "mj_fast_reroll": 0.08,
296
- "mj_fast_shorten": 0.08,
297
- "mj_fast_upload": 0.01,
298
- "mj_fast_upscale": 0.04,
299
- "mj_fast_upscale_creative": 0.08,
300
- "mj_fast_upscale_subtle": 0.08,
301
- "mj_fast_variation": 0.08,
302
- "mj_fast_zoom": 0.08,
295
+ "mj_fast_reroll": 0.1,
296
+ "mj_fast_shorten": 0.1,
297
+ "mj_fast_upload": 0.1,
298
+ "mj_fast_upscale": 0.05,
299
+ "mj_fast_upscale_creative": 0.1,
300
+ "mj_fast_upscale_subtle": 0.1,
301
+ "mj_fast_variation": 0.1,
302
+ "mj_fast_zoom": 0.1,
303
303
 
304
- "mj_relax_imagine": 0.08 * 0.5,
304
+ "mj_relax_imagine": 0.05 * MJ_RELAX,
305
305
 
306
306
  "mj_relax_blend": 0.08,
307
307
  "mj_relax_custom_oom": 0,
@@ -401,12 +401,13 @@ MODEL_RATIO = {
401
401
  # 阿里千问 https://dashscope.console.aliyun.com/billing
402
402
  "qwen-long": 0.25,
403
403
  "qwen-turbo": 0.05,
404
- "qwen-plus": 2,
404
+ "qwen-plus": 0.8,
405
405
  "qwen-max": 1.2,
406
406
  "qwen-max-longcontext": 20,
407
407
  "qwen-turbo-2024-11-01": 0.15,
408
408
  "qwen-max-latest": 1.2,
409
409
  "qwen2.5-max": 1.2,
410
+ "qwen-max-2025-01-25": 1.2,
410
411
 
411
412
  "qwen-vl-max-latest": 1.5,
412
413
  "qwen-vl-plus-latest": 0.75,
@@ -421,7 +422,6 @@ MODEL_RATIO = {
421
422
  "qwen/qwq-32b-preview": 1,
422
423
  "Qwen/QwQ-32B-Preview": 1,
423
424
 
424
-
425
425
  "qwq-max": 1.2,
426
426
  "qwq-max-search": 1.2,
427
427
  "qwen-max-search": 1.2,
@@ -771,7 +771,7 @@ COMPLETION_RATIO = {
771
771
  "gpt-4-all": 4,
772
772
  "gpt-4-gizmo-*": 4,
773
773
  "gpt-4o-all": 4,
774
- "gpt-4.5-preview-2025-02-27":2,
774
+ "gpt-4.5-preview-2025-02-27": 2,
775
775
 
776
776
  "o1-mini": 4,
777
777
  "o1-preview": 4,
@@ -862,6 +862,9 @@ COMPLETION_RATIO = {
862
862
  "qwen2-vl-72b-instruct": 5,
863
863
  "qwen-max-latest": 4,
864
864
  "qwen2.5-max": 4,
865
+ "qwen-max-2025-01-25": 4,
866
+
867
+ "qwen-plus": 2.5,
865
868
 
866
869
  "qwq-max": 4,
867
870
  "qwq-max-search": 4,
@@ -1109,7 +1112,6 @@ REDIRECT_MODEL = {
1109
1112
 
1110
1113
  }
1111
1114
 
1112
-
1113
1115
  GROUP_RATIO = {
1114
1116
  "chatfire": 1,
1115
1117
 
@@ -118,25 +118,9 @@ class CompletionRequest(BaseModel):
118
118
 
119
119
  # 拓展字段
120
120
 
121
- user_content: Optional[Any] = None # str dict
122
-
123
- system_messages: Optional[list] = None
124
- last_content: Optional[Any] = None
125
-
126
121
  def __init__(self, **kwargs):
127
122
  super().__init__(**kwargs)
128
123
 
129
- # if self.last_message.get("role") == "user":
130
- # user_content = self.last_message.get("content")
131
- # if isinstance(user_content, list): # 可能是多模态 todo: 多模态 'image_url','video_url' and 'video' 'file' audio
132
- # for i, c in enumerate(user_content):
133
- # if c.get("type") == "image_url":
134
- # user_content[i]["type"] = "image_url"
135
- # user_content[i]["image_url"] = user_content[i].get("image_url", {}).get("url", "")
136
- #
137
- # self.messages = self.messages or [{'role': 'user', 'content': 'hi'}]
138
- # self.system_messages = [m for m in self.messages if m.get("role") == "system"]
139
-
140
124
  class Config:
141
125
  extra = "allow"
142
126
 
@@ -144,11 +128,37 @@ class CompletionRequest(BaseModel):
144
128
  def last_message(self):
145
129
  return self.messages and self.messages[-1]
146
130
 
147
- # @cached_property
148
- def last_user_content(self):
149
- return "xxxxxxxx"
150
- # if self.last_message.get("role") == "user":
151
- # return self.last_message.get("content")
131
+ @cached_property
132
+ def last_user_content(self) -> str:
133
+ for i, message in enumerate(self.messages[::-1], 1):
134
+ if message.get("role") == "user":
135
+ user_contents = message.get("content")
136
+ if isinstance(user_contents, list):
137
+ for content in user_contents:
138
+ return content.get('text', "")
139
+ else:
140
+ return user_contents
141
+
142
+ @cached_property
143
+ def last_urls(self): # file_url 多轮对话需要 sum(request.last_urls.values(), [])
144
+ content_types = {"image_url", "file", "file_url", "video_url", "audio_url"}
145
+ for i, message in enumerate(self.messages[::-1], 1):
146
+ data = {}
147
+ if message.get("role") == "user": # 每一轮还要处理
148
+ user_contents = message.get("content")
149
+ if isinstance(user_contents, list): # 用户 url
150
+ for content in user_contents:
151
+ content_type = content.get("type")
152
+ if content_type in content_types:
153
+ if _url := content.get(content_type, {}): # {"type": "file", "file": fileid}
154
+ if isinstance(_url, str): # 兼容了spark qwenai
155
+ url = _url
156
+ else:
157
+ url = _url.get("url")
158
+ url and data.setdefault(content_type, []).append(url)
159
+
160
+ if data: return data
161
+ return {}
152
162
 
153
163
 
154
164
  class ChatCompletionRequest(BaseModel):
@@ -416,14 +426,29 @@ if __name__ == '__main__':
416
426
  #
417
427
  #
418
428
  # print(A(n=11))
419
- # messages = [
420
- # {
421
- # "role": "user",
422
- # "content": [{'role': 'user', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]}]
423
- # },
424
- #
425
- # # {'role': 'user', 'content': [{"type": "image_url", "image_url": {"url": "这是个图片链接"}}]},
426
- # ]
429
+ messages = [
430
+ {'role': 'user',
431
+ 'content': [{"type": "image_url", "image_url": "https://oss.ffire.cc/files/kling_watermark.png"}]},
432
+ {'role': 'assistant', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]},
433
+
434
+ {'role': 'assistant', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]},
435
+
436
+ {'role': 'user', 'content': [
437
+ {"type": "image_url", "image_url": {"url": "这是个图片链接1"}},
438
+ {"type": "file_url", "file_url": {"url": "这是个file_url"}},
439
+ {"type": "file_url", "file_url": {"url": "这是个file_url"}},
440
+ {"type": "file", "file": "这是个fileid"},
441
+
442
+ {"type": "audio_url", "audio_url": {"url": "这是个file_url"}},
443
+ {"type": "video_url", "video_url": {"url": "这是个video_url"}}
444
+ ]},
445
+
446
+ {'role': 'assistant', 'content': [{"type": "image_url", "image_url": "这是个图片链接"}]},
447
+ {'role': 'user', 'content': [
448
+ {"type": "text", "text": "这是个文本"},
449
+ {"type": "image_url", "image_url": "这是个图片链接"}
450
+ ]},
451
+ ]
427
452
  #
428
453
  # r = ChatCompletionRequest(model="gpt-3.5-turbo", messages=messages)
429
454
  # r.messages[-1]['content'] = [{"type": "image_url", "image_url": {"url": r.urls[-1]}}]
@@ -433,4 +458,5 @@ if __name__ == '__main__':
433
458
  # print(chat_completion)
434
459
  # print(chat_completion_chunk_stop)
435
460
 
436
- print(CompletionRequest().last_user_content())
461
+ # print(CompletionRequest(messages=messages).last_urls)
462
+ print(CompletionRequest(messages=messages).last_user_content)
@@ -7,6 +7,7 @@
7
7
  # @WeChat : meutils
8
8
  # @Software : PyCharm
9
9
  # @Description :
10
+ import pandas as pd
10
11
 
11
12
  from meutils.pipe import *
12
13
 
@@ -221,6 +222,7 @@ class SSEData(BaseModel):
221
222
 
222
223
  content: str = ""
223
224
  reasoning_content: str = ""
225
+ search_content: str = ""
224
226
 
225
227
  image: Optional[str] = None
226
228
 
@@ -229,10 +231,11 @@ class SSEData(BaseModel):
229
231
  def __init__(self, **data):
230
232
  super().__init__(**data)
231
233
 
232
- logger.debug(self.chunk)
234
+ # logger.debug(self.chunk)
233
235
 
234
236
  chunk = self.chunk.lstrip("data:")
235
237
 
238
+ content = ""
236
239
  if '"type":"progress"' in chunk:
237
240
  content = json.loads(chunk).get("msg", "")
238
241
 
@@ -254,14 +257,10 @@ class SSEData(BaseModel):
254
257
  # df['image'] = "![](" + df['image'] + ")"
255
258
 
256
259
  elif '{"type":"think"' in chunk: # 思考中...
257
- content = self.reasoning_content = json.loads(chunk).get("content", "")
260
+ self.reasoning_content = json.loads(chunk).get("content", "")
258
261
 
259
262
  elif '{"type":"searchGuid"' in chunk: # 思考中...
260
- content = self.reasoning_content = json.loads(chunk).get("docs", "")
261
-
262
- else:
263
- content = ""
264
- # chunk.strip() or logger.debug(chunk) # debug
263
+ self.search_content = json.loads(chunk).get("docs", "")
265
264
 
266
265
  self.content = content
267
266
 
meutils/types.py CHANGED
@@ -35,3 +35,5 @@ def is_list_of_strings(lst):
35
35
 
36
36
  def is_list_of_ints(lst):
37
37
  return isinstance(lst, List) and all(isinstance(item, int) for item in lst)
38
+
39
+