vectorvein 0.2.85__tar.gz → 0.2.87__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {vectorvein-0.2.85 → vectorvein-0.2.87}/PKG-INFO +1 -1
  2. {vectorvein-0.2.85 → vectorvein-0.2.87}/pyproject.toml +1 -1
  3. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/openai_compatible_client.py +10 -28
  4. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/utils.py +53 -97
  5. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/utilities/media_processing.py +62 -3
  6. {vectorvein-0.2.85 → vectorvein-0.2.87}/README.md +0 -0
  7. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/__init__.py +0 -0
  8. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/api/__init__.py +0 -0
  9. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/api/client.py +0 -0
  10. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/api/exceptions.py +0 -0
  11. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/api/models.py +0 -0
  12. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/__init__.py +0 -0
  13. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  14. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  15. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/base_client.py +0 -0
  16. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  17. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  18. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  19. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/groq_client.py +0 -0
  20. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/local_client.py +0 -0
  21. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  22. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  23. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  24. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/openai_client.py +0 -0
  25. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/py.typed +0 -0
  26. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  27. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  28. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/xai_client.py +0 -0
  29. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/yi_client.py +0 -0
  30. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  31. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/py.typed +0 -0
  32. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/server/token_server.py +0 -0
  33. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/settings/__init__.py +0 -0
  34. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/settings/py.typed +0 -0
  35. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/__init__.py +0 -0
  36. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/defaults.py +0 -0
  37. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/enums.py +0 -0
  38. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/exception.py +0 -0
  39. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/llm_parameters.py +0 -0
  40. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/py.typed +0 -0
  41. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/types/settings.py +0 -0
  42. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/utilities/rate_limiter.py +0 -0
  43. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/utilities/retry.py +0 -0
  44. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/edge.py +0 -0
  45. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/node.py +0 -0
  46. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/port.py +0 -0
  47. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/graph/workflow.py +0 -0
  48. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/analyse.py +0 -0
  65. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/check.py +0 -0
  66. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
  67. {vectorvein-0.2.85 → vectorvein-0.2.87}/src/vectorvein/workflow/utils/layout.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.85
3
+ Version: 0.2.87
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein Python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.2.85"
20
+ version = "0.2.87"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -99,7 +99,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
99
99
  return AzureOpenAI(
100
100
  azure_endpoint=self.endpoint.api_base,
101
101
  api_key=self.endpoint.api_key,
102
- api_version="2025-03-01-preview",
102
+ api_version="2025-04-01-preview",
103
103
  http_client=self.http_client,
104
104
  )
105
105
  else:
@@ -327,9 +327,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
327
327
  if not max_tokens and not max_completion_tokens:
328
328
  max_output_tokens = self.model_setting.max_output_tokens
329
329
  native_multimodal = self.model_setting.native_multimodal
330
- token_counts = get_message_token_counts(
331
- messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
332
- )
330
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
333
331
  if max_output_tokens is not None:
334
332
  max_tokens = self.model_setting.context_length - token_counts - 64
335
333
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -373,9 +371,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
373
371
  extra_body=extra_body,
374
372
  timeout=timeout,
375
373
  stream_options=stream_options,
376
- response_format=response_format
377
- if response_format and self.model_setting.response_format_available
378
- else OPENAI_NOT_GIVEN,
374
+ response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
379
375
  **tools_params, # type: ignore
380
376
  )
381
377
 
@@ -501,9 +497,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
501
497
  extra_body=extra_body,
502
498
  timeout=timeout,
503
499
  stream_options=stream_options,
504
- response_format=response_format
505
- if response_format and self.model_setting.response_format_available
506
- else OPENAI_NOT_GIVEN,
500
+ response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
507
501
  **tools_params, # type: ignore
508
502
  )
509
503
 
@@ -524,10 +518,7 @@ class OpenAICompatibleChatClient(BaseChatClient):
524
518
 
525
519
  if tools:
526
520
  if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
527
- result["tool_calls"] = [
528
- {**tool_call.model_dump(), "type": "function"}
529
- for tool_call in response.choices[0].message.tool_calls
530
- ]
521
+ result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
531
522
  else:
532
523
  tool_call_content_processor = ToolCallContentProcessor(result["content"])
533
524
  tool_call_data = tool_call_content_processor.tool_calls
@@ -581,7 +572,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
581
572
  return AsyncAzureOpenAI(
582
573
  azure_endpoint=self.endpoint.api_base,
583
574
  api_key=self.endpoint.api_key,
584
- api_version="2025-03-01-preview",
575
+ api_version="2025-04-01-preview",
585
576
  http_client=self.http_client,
586
577
  )
587
578
  else:
@@ -809,9 +800,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
809
800
  if not max_tokens and not max_completion_tokens:
810
801
  max_output_tokens = self.model_setting.max_output_tokens
811
802
  native_multimodal = self.model_setting.native_multimodal
812
- token_counts = get_message_token_counts(
813
- messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal
814
- )
803
+ token_counts = get_message_token_counts(messages=messages, tools=tools, model=self.model, native_multimodal=native_multimodal)
815
804
  if max_output_tokens is not None:
816
805
  max_tokens = self.model_setting.context_length - token_counts - 64
817
806
  max_tokens = min(max(max_tokens, 1), max_output_tokens)
@@ -855,9 +844,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
855
844
  extra_body=extra_body,
856
845
  timeout=timeout,
857
846
  stream_options=stream_options,
858
- response_format=response_format
859
- if response_format and self.model_setting.response_format_available
860
- else OPENAI_NOT_GIVEN,
847
+ response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
861
848
  **tools_params, # type: ignore
862
849
  )
863
850
 
@@ -983,9 +970,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
983
970
  extra_body=extra_body,
984
971
  timeout=timeout,
985
972
  stream_options=stream_options,
986
- response_format=response_format
987
- if response_format and self.model_setting.response_format_available
988
- else OPENAI_NOT_GIVEN,
973
+ response_format=response_format if response_format and self.model_setting.response_format_available else OPENAI_NOT_GIVEN,
989
974
  **tools_params, # type: ignore
990
975
  )
991
976
 
@@ -1006,10 +991,7 @@ class AsyncOpenAICompatibleChatClient(BaseAsyncChatClient):
1006
991
 
1007
992
  if tools:
1008
993
  if self.model_setting.function_call_available and response.choices[0].message.tool_calls:
1009
- result["tool_calls"] = [
1010
- {**tool_call.model_dump(), "type": "function"}
1011
- for tool_call in response.choices[0].message.tool_calls
1012
- ]
994
+ result["tool_calls"] = [{**tool_call.model_dump(), "type": "function"} for tool_call in response.choices[0].message.tool_calls]
1013
995
  else:
1014
996
  tool_call_content_processor = ToolCallContentProcessor(result["content"])
1015
997
  tool_call_data = tool_call_content_processor.tool_calls
@@ -103,18 +103,8 @@ def convert_type(value, value_type):
103
103
 
104
104
  def get_token_counts(text: str | dict, model: str = "", use_token_server_first: bool = True) -> int:
105
105
  if use_token_server_first and settings.token_server is not None:
106
- base_url = (
107
- settings.token_server.url
108
- if settings.token_server.url is not None
109
- else f"http://{settings.token_server.host}:{settings.token_server.port}"
110
- )
111
- _, response = (
112
- Retry(httpx.post)
113
- .args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None)
114
- .retry_times(5)
115
- .sleep_time(1)
116
- .run()
117
- )
106
+ base_url = settings.token_server.url if settings.token_server.url is not None else f"http://{settings.token_server.host}:{settings.token_server.port}"
107
+ _, response = Retry(httpx.post).args(url=f"{base_url}/count_tokens", json={"text": text, "model": model}, timeout=None).retry_times(5).sleep_time(1).run()
118
108
  if response is not None:
119
109
  try:
120
110
  result = response.json()
@@ -147,13 +137,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
147
137
  ],
148
138
  }
149
139
 
150
- _, response = (
151
- Retry(httpx.post)
152
- .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
153
- .retry_times(5)
154
- .sleep_time(10)
155
- .run()
156
- )
140
+ _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
157
141
  if response is None:
158
142
  return 1000
159
143
  result = response.json()
@@ -174,13 +158,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
174
158
  {"role": "user", "content": text},
175
159
  ],
176
160
  }
177
- _, response = (
178
- Retry(httpx.post)
179
- .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
180
- .retry_times(5)
181
- .sleep_time(10)
182
- .run()
183
- )
161
+ _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
184
162
  if response is None:
185
163
  return 1000
186
164
  result = response.json()
@@ -194,11 +172,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
194
172
  endpoint_id = endpoint_id["endpoint_id"]
195
173
  endpoint = settings.get_endpoint(endpoint_id)
196
174
 
197
- api_base = (
198
- endpoint.api_base.removesuffix("/openai/")
199
- if endpoint.api_base
200
- else "https://generativelanguage.googleapis.com/v1beta"
201
- )
175
+ api_base = endpoint.api_base.removesuffix("/openai/") if endpoint.api_base else "https://generativelanguage.googleapis.com/v1beta"
202
176
  base_url = f"{api_base}/models/{backend_setting.id}:countTokens"
203
177
  params = {"key": endpoint.api_key}
204
178
  request_body = {
@@ -209,13 +183,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
209
183
  ],
210
184
  },
211
185
  }
212
- _, response = (
213
- Retry(httpx.post)
214
- .args(base_url, json=request_body, params=params, timeout=None)
215
- .retry_times(5)
216
- .sleep_time(10)
217
- .run()
218
- )
186
+ _, response = Retry(httpx.post).args(base_url, json=request_body, params=params, timeout=None).retry_times(5).sleep_time(10).run()
219
187
  if response is None:
220
188
  return 1000
221
189
  result = response.json()
@@ -230,12 +198,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
230
198
  endpoint_id = endpoint_choice
231
199
  endpoint = settings.get_endpoint(endpoint_id)
232
200
 
233
- if (
234
- endpoint.is_vertex
235
- or endpoint.is_bedrock
236
- or endpoint.endpoint_type == "anthropic_vertex"
237
- or endpoint.endpoint_type == "anthropic_bedrock"
238
- ):
201
+ if endpoint.is_vertex or endpoint.is_bedrock or endpoint.endpoint_type == "anthropic_vertex" or endpoint.endpoint_type == "anthropic_bedrock":
239
202
  continue
240
203
  elif endpoint.endpoint_type in ("default", "anthropic"):
241
204
  return (
@@ -277,13 +240,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
277
240
  {"role": "user", "content": text},
278
241
  ],
279
242
  }
280
- _, response = (
281
- Retry(httpx.post)
282
- .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
283
- .retry_times(5)
284
- .sleep_time(10)
285
- .run()
286
- )
243
+ _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
287
244
  if response is None:
288
245
  return 1000
289
246
  result = response.json()
@@ -306,13 +263,7 @@ def get_token_counts(text: str | dict, model: str = "", use_token_server_first:
306
263
  {"role": "user", "content": text},
307
264
  ],
308
265
  }
309
- _, response = (
310
- Retry(httpx.post)
311
- .args(url=tokenize_url, headers=headers, json=request_body, timeout=None)
312
- .retry_times(5)
313
- .sleep_time(10)
314
- .run()
315
- )
266
+ _, response = Retry(httpx.post).args(url=tokenize_url, headers=headers, json=request_body, timeout=None).retry_times(5).sleep_time(10).run()
316
267
  if response is None:
317
268
  return 1000
318
269
  result = response.json()
@@ -453,48 +404,52 @@ def cutoff_messages(
453
404
  return system_message + messages
454
405
 
455
406
 
456
- def format_image_message(image: str, backend: BackendType = BackendType.OpenAI) -> dict:
457
- from ..utilities.media_processing import ImageProcessor
407
+ def format_image_message(image: str, backend: BackendType = BackendType.OpenAI, process_image: bool = True) -> dict:
408
+ if process_image:
409
+ from ..utilities.media_processing import ImageProcessor
458
410
 
459
- image_processor = ImageProcessor(image_source=image)
460
- if backend == BackendType.OpenAI:
461
- return {
462
- "type": "image_url",
463
- "image_url": {"url": image_processor.data_url},
464
- }
465
- elif backend == BackendType.Anthropic:
466
- return {
467
- "type": "image",
468
- "source": {
469
- "type": "base64",
470
- "media_type": image_processor.mime_type,
471
- "data": image_processor.base64_image,
472
- },
473
- }
411
+ image_processor = ImageProcessor(image_source=image)
412
+ if backend == BackendType.OpenAI:
413
+ return {
414
+ "type": "image_url",
415
+ "image_url": {"url": image_processor.data_url},
416
+ }
417
+ elif backend == BackendType.Anthropic:
418
+ return {
419
+ "type": "image",
420
+ "source": {
421
+ "type": "base64",
422
+ "media_type": image_processor.mime_type,
423
+ "data": image_processor.base64_image,
424
+ },
425
+ }
426
+ else:
427
+ return {
428
+ "type": "image_url",
429
+ "image_url": {"url": image_processor.data_url},
430
+ }
474
431
  else:
475
- return {
476
- "type": "image_url",
477
- "image_url": {"url": image_processor.data_url},
478
- }
432
+ if backend == BackendType.Anthropic:
433
+ return {"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image}}
434
+ else:
435
+ return {"type": "image_url", "image_url": {"url": image}}
479
436
 
480
437
 
481
438
  def format_workflow_messages(message: dict, content: str, backend: BackendType):
482
439
  formatted_messages = []
483
440
 
484
441
  # 工具调用消息
485
- if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
442
+ if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
486
443
  tool_call_message = {
487
- "content": None,
444
+ "content": content,
488
445
  "role": "assistant",
489
446
  "tool_calls": [
490
447
  {
491
- "id": message["metadata"]["selected_workflow"]["tool_call_id"],
448
+ "id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
492
449
  "type": "function",
493
450
  "function": {
494
451
  "name": message["metadata"]["selected_workflow"]["function_name"],
495
- "arguments": json.dumps(
496
- message["metadata"]["selected_workflow"]["params"], ensure_ascii=False
497
- ),
452
+ "arguments": json.dumps(message["metadata"]["selected_workflow"]["params"], ensure_ascii=False),
498
453
  },
499
454
  }
500
455
  ],
@@ -527,10 +482,10 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
527
482
  formatted_messages.append(tool_call_message)
528
483
 
529
484
  # 工具调用结果消息
530
- if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini):
485
+ if backend in (BackendType.OpenAI, BackendType.ZhiPuAI, BackendType.Mistral, BackendType.Yi, BackendType.Gemini, BackendType.DeepSeek):
531
486
  tool_call_result_message = {
532
487
  "role": "tool",
533
- "tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"],
488
+ "tool_call_id": message["metadata"]["selected_workflow"]["tool_call_id"] or message["metadata"]["record_id"],
534
489
  "name": message["metadata"]["selected_workflow"]["function_name"],
535
490
  "content": message["metadata"].get("workflow_result", ""),
536
491
  }
@@ -558,8 +513,8 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
558
513
  }
559
514
  formatted_messages.append(tool_call_result_message)
560
515
 
561
- if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
562
- formatted_messages.append({"role": "assistant", "content": content})
516
+ # if content and backend not in (BackendType.Mistral, BackendType.Anthropic):
517
+ # formatted_messages.append({"role": "assistant", "content": content})
563
518
 
564
519
  return formatted_messages
565
520
 
@@ -569,6 +524,7 @@ def transform_from_openai_message(
569
524
  backend: BackendType,
570
525
  native_multimodal: bool = False,
571
526
  function_call_available: bool = False,
527
+ process_image: bool = True,
572
528
  ):
573
529
  role = message.get("role", "user")
574
530
  content = message.get("content", "")
@@ -593,7 +549,7 @@ def transform_from_openai_message(
593
549
  formatted_content.append({"type": "text", "text": item})
594
550
  elif isinstance(item, dict) and "type" in item:
595
551
  if item["type"] == "image_url":
596
- formatted_content.append(format_image_message(item["image_url"]["url"], backend))
552
+ formatted_content.append(format_image_message(item["image_url"]["url"], backend, process_image))
597
553
  else:
598
554
  formatted_content.append(item)
599
555
  if tool_calls:
@@ -674,6 +630,7 @@ def format_messages(
674
630
  backend: BackendType = BackendType.OpenAI,
675
631
  native_multimodal: bool = False,
676
632
  function_call_available: bool = False,
633
+ process_image: bool = True,
677
634
  ) -> list:
678
635
  """将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
679
636
 
@@ -681,6 +638,8 @@ def format_messages(
681
638
  messages (list): VectorVein Or OpenAI messages list.
682
639
  backend (str, optional): Messages format target backend. Defaults to BackendType.OpenAI.
683
640
  native_multimodal (bool, optional): Use native multimodal ability. Defaults to False.
641
+ function_call_available (bool, optional): Use function call ability. Defaults to False.
642
+ process_image (bool, optional): Process image. Defaults to True.
684
643
 
685
644
  Returns:
686
645
  list: 转换后的消息列表
@@ -697,17 +656,13 @@ def format_messages(
697
656
  content = message["content"]["text"]
698
657
  if message["content_type"] == "TXT":
699
658
  role = "user" if message["author_type"] == "U" else "assistant"
700
- formatted_message = format_text_message(
701
- content, role, message.get("attachments", []), backend, native_multimodal
702
- )
659
+ formatted_message = format_text_message(content, role, message.get("attachments", []), backend, native_multimodal, process_image)
703
660
  formatted_messages.append(formatted_message)
704
661
  elif message["content_type"] == "WKF" and message["status"] in ("S", "R"):
705
662
  formatted_messages.extend(format_workflow_messages(message, content, backend))
706
663
  else:
707
664
  # 处理 OpenAI 格式的消息
708
- formatted_message = transform_from_openai_message(
709
- message, backend, native_multimodal, function_call_available
710
- )
665
+ formatted_message = transform_from_openai_message(message, backend, native_multimodal, function_call_available, process_image)
711
666
  formatted_messages.append(formatted_message)
712
667
 
713
668
  return formatted_messages
@@ -719,6 +674,7 @@ def format_text_message(
719
674
  attachments: list,
720
675
  backend: BackendType,
721
676
  native_multimodal: bool,
677
+ process_image: bool = True,
722
678
  ):
723
679
  images_extensions = ("jpg", "jpeg", "png", "bmp")
724
680
  has_images = any(attachment.lower().endswith(images_extensions) for attachment in attachments)
@@ -733,7 +689,7 @@ def format_text_message(
733
689
  "content": [
734
690
  {"type": "text", "text": content},
735
691
  *[
736
- format_image_message(image=attachment, backend=backend)
692
+ format_image_message(image=attachment, backend=backend, process_image=process_image)
737
693
  for attachment in attachments
738
694
  if attachment.lower().endswith(images_extensions)
739
695
  ],
@@ -3,6 +3,7 @@
3
3
  import base64
4
4
  from io import BytesIO
5
5
  from pathlib import Path
6
+ from typing import Literal
6
7
  from functools import cached_property
7
8
 
8
9
  import httpx
@@ -93,6 +94,66 @@ class ImageProcessor:
93
94
  if scale_factor < 0.1:
94
95
  return img_bytes_resized
95
96
 
97
+ def _clear_cache(self):
98
+ self._cached_bytes = None
99
+ self._cached_base64_image = None
100
+
101
+ def convert_format(self, target_format: Literal["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]):
102
+ """
103
+ 转换图片的格式。
104
+
105
+ 此方法将图片转换为指定的格式。支持的格式包括:'JPEG', 'PNG', 'GIF', 'BMP', 'WEBP' 等。
106
+
107
+ 参数:
108
+ -----
109
+ target_format : str
110
+ 目标格式的字符串表示,不区分大小写。例如 'png', 'jpeg', 'gif' 等。
111
+
112
+ 返回:
113
+ -----
114
+ self
115
+ 返回类的实例,支持方法链式调用。
116
+
117
+ 异常:
118
+ -----
119
+ ValueError
120
+ 如果指定的格式不被支持。
121
+
122
+ 示例:
123
+ -----
124
+ >>> img.convert_format('png') # 将图片转换为PNG格式
125
+ >>> img.convert_format('JPEG') # 将图片转换为JPEG格式
126
+ """
127
+
128
+ # 检查格式是否支持
129
+ supported_formats = ["JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"]
130
+ if target_format not in supported_formats:
131
+ raise ValueError(f"不支持的格式: {target_format}。支持的格式有: {', '.join(supported_formats)}")
132
+
133
+ # 如果当前格式与目标格式相同,不执行转换
134
+ if self._image.format == target_format:
135
+ return self
136
+
137
+ # 创建一个新的字节流
138
+ img_bytes = BytesIO()
139
+
140
+ # 处理透明通道
141
+ if target_format == "JPEG" and self._image.mode in ("RGBA", "LA"):
142
+ # JPEG不支持透明通道,转换为RGB
143
+ self._image = self._image.convert("RGB")
144
+
145
+ # 保存为新格式
146
+ self._image.save(img_bytes, format=target_format, optimize=True)
147
+ img_bytes.seek(0)
148
+
149
+ # 加载新格式的图片
150
+ self._image = Image.open(img_bytes)
151
+ self._image_format = target_format
152
+
153
+ # 清除缓存
154
+ self._clear_cache()
155
+ return self
156
+
96
157
  @property
97
158
  def bytes(self):
98
159
  if self._cached_bytes is not None:
@@ -102,9 +163,7 @@ class ImageProcessor:
102
163
  img_bytes = BytesIO()
103
164
 
104
165
  # 检查图像是否有透明通道
105
- has_transparency = self._image.mode in ("RGBA", "LA") or (
106
- self._image.mode == "P" and "transparency" in self._image.info
107
- )
166
+ has_transparency = self._image.mode in ("RGBA", "LA") or (self._image.mode == "P" and "transparency" in self._image.info)
108
167
 
109
168
  if has_transparency:
110
169
  # 如果有透明通道,使用PNG格式
File without changes