vectorvein 0.2.44__tar.gz → 0.2.46__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {vectorvein-0.2.44 → vectorvein-0.2.46}/PKG-INFO +1 -1
  2. {vectorvein-0.2.44 → vectorvein-0.2.46}/pyproject.toml +1 -1
  3. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/utils.py +58 -5
  4. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/graph/workflow.py +63 -2
  5. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/file_processing.py +2 -0
  6. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/image_generation.py +4 -0
  7. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/media_editing.py +7 -0
  8. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/media_processing.py +9 -0
  9. {vectorvein-0.2.44 → vectorvein-0.2.46}/README.md +0 -0
  10. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/__init__.py +0 -0
  11. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/api/__init__.py +0 -0
  12. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/api/client.py +0 -0
  13. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/api/exceptions.py +0 -0
  14. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/api/models.py +0 -0
  15. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/__init__.py +0 -0
  16. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/anthropic_client.py +0 -0
  17. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  18. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/base_client.py +0 -0
  19. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  20. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  21. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  22. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/groq_client.py +0 -0
  23. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/local_client.py +0 -0
  24. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  25. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  26. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  27. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/openai_client.py +0 -0
  28. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  29. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/py.typed +0 -0
  30. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  31. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  32. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/xai_client.py +0 -0
  33. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/yi_client.py +0 -0
  34. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  35. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/py.typed +0 -0
  36. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/server/token_server.py +0 -0
  37. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/settings/__init__.py +0 -0
  38. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/settings/py.typed +0 -0
  39. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/__init__.py +0 -0
  40. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/defaults.py +0 -0
  41. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/enums.py +0 -0
  42. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/exception.py +0 -0
  43. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/llm_parameters.py +0 -0
  44. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/py.typed +0 -0
  45. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/types/settings.py +0 -0
  46. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/utilities/media_processing.py +0 -0
  47. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/utilities/rate_limiter.py +0 -0
  48. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/utilities/retry.py +0 -0
  49. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/graph/edge.py +0 -0
  50. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/graph/node.py +0 -0
  51. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/graph/port.py +0 -0
  52. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  53. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  54. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  55. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/llms.py +0 -0
  56. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.44 → vectorvein-0.2.46}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.44
3
+ Version: 0.2.46
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein Python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.2.44"
20
+ version = "0.2.46"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -489,7 +489,9 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
489
489
  "type": "function",
490
490
  "function": {
491
491
  "name": message["metadata"]["selected_workflow"]["function_name"],
492
- "arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
492
+ "arguments": json.dumps(
493
+ message["metadata"]["selected_workflow"]["params"], ensure_ascii=False
494
+ ),
493
495
  },
494
496
  }
495
497
  ],
@@ -513,7 +515,7 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
513
515
  "content": json.dumps(
514
516
  {
515
517
  "name": message["metadata"]["selected_workflow"]["function_name"],
516
- "arguments": json.dumps(message["metadata"]["selected_workflow"]["params"]),
518
+ "arguments": json.dumps(message["metadata"]["selected_workflow"]["params"], ensure_ascii=False),
517
519
  },
518
520
  ensure_ascii=False,
519
521
  ),
@@ -559,7 +561,12 @@ def format_workflow_messages(message: dict, content: str, backend: BackendType):
559
561
  return formatted_messages
560
562
 
561
563
 
562
- def transform_from_openai_message(message: ChatCompletionMessageParam, backend: BackendType):
564
+ def transform_from_openai_message(
565
+ message: ChatCompletionMessageParam,
566
+ backend: BackendType,
567
+ native_multimodal: bool = False,
568
+ function_call_available: bool = False,
569
+ ):
563
570
  role = message.get("role", "user")
564
571
  content = message.get("content", "")
565
572
  tool_calls = message.get("tool_calls", [])
@@ -613,13 +620,57 @@ def transform_from_openai_message(message: ChatCompletionMessageParam, backend:
613
620
  else:
614
621
  return {"role": role, "content": content}
615
622
  else:
616
- return message # 对于其他后端,保持原样
623
+ if isinstance(content, str):
624
+ if not function_call_available:
625
+ if message["role"] == "tool":
626
+ return {
627
+ "role": "user",
628
+ "content": f"Tool<tool_call_id: {message['tool_call_id']}> Call result: {content}",
629
+ }
630
+ elif message["role"] == "assistant":
631
+ if tool_calls:
632
+ tool_calls_content = json.dumps(tool_calls, ensure_ascii=False)
633
+ content += f"Tool calls: {tool_calls_content}"
634
+ return {"role": role, "content": content}
635
+
636
+ return message
637
+ elif isinstance(content, list):
638
+ formatted_content = []
639
+ for item in content:
640
+ if item["type"] == "image_url" and not native_multimodal:
641
+ formatted_content.append({"type": "text", "text": f"Image<image_url: {item['image_url']['url']}>"})
642
+ else:
643
+ formatted_content.append(item)
644
+
645
+ if not function_call_available:
646
+ if message["role"] == "tool":
647
+ role = "user"
648
+ formatted_content = [
649
+ {
650
+ "type": "text",
651
+ "text": f"Tool<tool_call_id: {message['tool_call_id']}> Call result: {formatted_content}",
652
+ }
653
+ ]
654
+ elif message["role"] == "assistant":
655
+ if tool_calls:
656
+ tool_calls_content = json.dumps(tool_calls, ensure_ascii=False)
657
+ formatted_content.append({"type": "text", "text": f"Tool calls: {tool_calls_content}"})
658
+
659
+ return {"role": role, "content": formatted_content}
660
+
661
+ if tool_calls:
662
+ return {"role": role, "content": formatted_content, "tool_calls": tool_calls}
663
+ else:
664
+ return {"role": role, "content": formatted_content}
665
+ else:
666
+ return message
617
667
 
618
668
 
619
669
  def format_messages(
620
670
  messages: list,
621
671
  backend: BackendType = BackendType.OpenAI,
622
672
  native_multimodal: bool = False,
673
+ function_call_available: bool = False,
623
674
  ) -> list:
624
675
  """将 VectorVein 和 OpenAI 的 Message 序列化后的格式转换为不同模型支持的格式
625
676
 
@@ -651,7 +702,9 @@ def format_messages(
651
702
  formatted_messages.extend(format_workflow_messages(message, content, backend))
652
703
  else:
653
704
  # 处理 OpenAI 格式的消息
654
- formatted_message = transform_from_openai_message(message, backend)
705
+ formatted_message = transform_from_openai_message(
706
+ message, backend, native_multimodal, function_call_available
707
+ )
655
708
  formatted_messages.append(formatted_message)
656
709
 
657
710
  return formatted_messages
@@ -3,13 +3,23 @@ from typing import List, Union, TypedDict
3
3
 
4
4
  from .node import Node
5
5
  from .edge import Edge
6
+ from .port import InputPort
6
7
 
7
8
 
8
- class WorkflowCheckResult(TypedDict):
9
+ class UIWarning(TypedDict, total=False):
10
+ """UI警告类型。"""
11
+
12
+ input_ports_shown_but_connected: list[dict] # 显示的输入端口但被连接
13
+ has_shown_input_ports: bool # 是否存在显示的输入端口
14
+ has_output_nodes: bool # 是否存在输出节点
15
+
16
+
17
+ class WorkflowCheckResult(TypedDict, total=False):
9
18
  """工作流检查结果类型。"""
10
19
 
11
20
  no_cycle: bool # 工作流是否不包含环
12
21
  no_isolated_nodes: bool # 工作流是否不包含孤立节点
22
+ ui_warnings: UIWarning # UI相关警告
13
23
 
14
24
 
15
25
  class Workflow:
@@ -215,10 +225,61 @@ class Workflow:
215
225
 
216
226
  return result
217
227
 
228
+ def _check_ui(self) -> UIWarning:
229
+ """
230
+ 检查工作流的 UI 情况。
231
+ 以下情况会警告:
232
+ 1. 某个输入端口的 show=True,但是又有连线连接到该端口(实际运行时会被覆盖)。
233
+ 2. 整个工作流没有任何输入端口是 show=True 的,说明没有让用户输入的地方。
234
+ 3. 整个工作流没有任何输出节点,这样工作流结果无法呈现。
235
+ """
236
+ warnings: UIWarning = {
237
+ "input_ports_shown_but_connected": [],
238
+ "has_shown_input_ports": False,
239
+ "has_output_nodes": False,
240
+ }
241
+
242
+ # 检查是否有任何显示的输入端口
243
+ has_shown_input_ports = False
244
+
245
+ # 找出所有连接的目标端口
246
+ connected_ports = {(edge.target, edge.target_handle) for edge in self.edges}
247
+
248
+ # 遍历所有节点
249
+ for node in self.nodes:
250
+ # 检查是否为输出节点
251
+ if hasattr(node, "category") and node.category == "outputs":
252
+ warnings["has_output_nodes"] = True
253
+
254
+ # 检查节点的输入端口
255
+ for port_name in node.ports.keys() if hasattr(node, "ports") else []:
256
+ port = node.ports.get(port_name)
257
+ # 确保是输入端口且设置为显示
258
+ if hasattr(port, "show") and getattr(port, "show", False) and isinstance(port, InputPort):
259
+ has_shown_input_ports = True
260
+
261
+ # 检查显示的端口是否也被连接
262
+ if (node.id, port_name) in connected_ports:
263
+ warnings["input_ports_shown_but_connected"].append(
264
+ {"node_id": node.id, "node_type": node.type, "port_name": port_name}
265
+ )
266
+
267
+ # 如果没有任何显示的输入端口
268
+ warnings["has_shown_input_ports"] = has_shown_input_ports
269
+
270
+ return warnings
271
+
218
272
  def check(self) -> WorkflowCheckResult:
219
273
  """检查流程图的有效性。
220
274
 
221
275
  Returns:
222
276
  WorkflowCheckResult: 包含各种检查结果的字典
223
277
  """
224
- return self._check_dag()
278
+ dag_check = self._check_dag()
279
+ ui_check = self._check_ui()
280
+
281
+ # 合并结果
282
+ result: WorkflowCheckResult = dag_check
283
+ result["ui_warnings"] = ui_check
284
+
285
+ return result
@@ -17,6 +17,7 @@ class FileLoader(Node):
17
17
  port_type=PortType.FILE,
18
18
  value=list(),
19
19
  multiple=True,
20
+ show=True,
20
21
  ),
21
22
  "parse_quality": InputPort(
22
23
  name="parse_quality",
@@ -75,6 +76,7 @@ class FileUpload(Node):
75
76
  value=list(),
76
77
  support_file_types=["*/*"],
77
78
  multiple=True,
79
+ show=True,
78
80
  ),
79
81
  "unzip_files": InputPort(
80
82
  name="unzip_files",
@@ -18,6 +18,7 @@ class BackgroundGeneration(Node):
18
18
  value=list(),
19
19
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
20
20
  multiple=True,
21
+ show=True,
21
22
  ),
22
23
  "remove_background": InputPort(
23
24
  name="remove_background",
@@ -266,6 +267,7 @@ class Inpainting(Node):
266
267
  value=list(),
267
268
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
268
269
  multiple=True,
270
+ show=True,
269
271
  ),
270
272
  "inpainting_method": InputPort(
271
273
  name="inpainting_method",
@@ -402,12 +404,14 @@ class Pulid(Node):
402
404
  value=list(),
403
405
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
404
406
  multiple=True,
407
+ show=True,
405
408
  ),
406
409
  "prompt": InputPort(
407
410
  name="prompt",
408
411
  port_type=PortType.TEXTAREA,
409
412
  value="",
410
413
  multiple=True,
414
+ show=True,
411
415
  ),
412
416
  "negative_prompt": InputPort(
413
417
  name="negative_prompt",
@@ -18,6 +18,7 @@ class AudioEditing(Node):
18
18
  value=list(),
19
19
  support_file_types=[".mp3", ".wav", ".ogg", ".m4a"],
20
20
  multiple=True,
21
+ show=True,
21
22
  ),
22
23
  "audio_processing_logic": InputPort(
23
24
  name="audio_processing_logic",
@@ -166,6 +167,7 @@ class ImageBackgroundRemoval(Node):
166
167
  value=list(),
167
168
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
168
169
  multiple=True,
170
+ show=True,
169
171
  ),
170
172
  "remove_background_method": InputPort(
171
173
  name="remove_background_method",
@@ -223,6 +225,7 @@ class ImageEditing(Node):
223
225
  value=list(),
224
226
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
225
227
  multiple=True,
228
+ show=True,
226
229
  ),
227
230
  "crop": InputPort(
228
231
  name="crop",
@@ -365,6 +368,7 @@ class ImageSegmentation(Node):
365
368
  value=list(),
366
369
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
367
370
  multiple=True,
371
+ show=True,
368
372
  ),
369
373
  "selection_method": InputPort(
370
374
  name="selection_method",
@@ -426,6 +430,7 @@ class ImageWatermark(Node):
426
430
  value=list(),
427
431
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
428
432
  multiple=True,
433
+ show=True,
429
434
  ),
430
435
  "image_or_text": InputPort(
431
436
  name="image_or_text",
@@ -559,6 +564,7 @@ class VideoEditing(Node):
559
564
  value=list(),
560
565
  support_file_types=["video/*"],
561
566
  multiple=True,
567
+ show=True,
562
568
  ),
563
569
  "video_processing_logic": InputPort(
564
570
  name="video_processing_logic",
@@ -637,6 +643,7 @@ class VideoScreenshot(Node):
637
643
  value=list(),
638
644
  support_file_types=["video/*"],
639
645
  multiple=True,
646
+ show=True,
640
647
  ),
641
648
  "screenshot_method": InputPort(
642
649
  name="screenshot_method",
@@ -51,6 +51,7 @@ class ClaudeVision(Node):
51
51
  multiple=True,
52
52
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
53
53
  condition="fields_data.get('images_or_urls') == 'images'",
54
+ show=True,
54
55
  ),
55
56
  "urls": InputPort(
56
57
  name="urls",
@@ -100,6 +101,7 @@ class DeepseekVl(Node):
100
101
  multiple=True,
101
102
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
102
103
  condition="fields_data.get('images_or_urls') == 'images'",
104
+ show=True,
103
105
  ),
104
106
  "urls": InputPort(
105
107
  name="urls",
@@ -159,6 +161,7 @@ class GeminiVision(Node):
159
161
  multiple=True,
160
162
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
161
163
  condition="fields_data.get('images_or_urls') == 'images'",
164
+ show=True,
162
165
  ),
163
166
  "urls": InputPort(
164
167
  name="urls",
@@ -210,6 +213,7 @@ class GlmVision(Node):
210
213
  multiple=True,
211
214
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
212
215
  condition="fields_data.images_or_urls.value == 'images'",
216
+ show=True,
213
217
  ),
214
218
  "urls": InputPort(
215
219
  name="urls",
@@ -260,6 +264,7 @@ class GptVision(Node):
260
264
  multiple=True,
261
265
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
262
266
  condition="fields_data.get('images_or_urls') == 'images'",
267
+ show=True,
263
268
  ),
264
269
  "urls": InputPort(
265
270
  name="urls",
@@ -320,6 +325,7 @@ class InternVision(Node):
320
325
  multiple=True,
321
326
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
322
327
  condition="fields_data.get('images_or_urls') == 'images'",
328
+ show=True,
323
329
  ),
324
330
  "urls": InputPort(
325
331
  name="urls",
@@ -366,6 +372,7 @@ class Ocr(Node):
366
372
  multiple=True,
367
373
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
368
374
  condition="fields_data.get('images_or_urls') == 'images'",
375
+ show=True,
369
376
  ),
370
377
  "urls": InputPort(
371
378
  name="urls",
@@ -439,6 +446,7 @@ class QwenVision(Node):
439
446
  multiple=True,
440
447
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
441
448
  condition="fields_data.get('images_or_urls') == 'images'",
449
+ show=True,
442
450
  ),
443
451
  "urls": InputPort(
444
452
  name="urls",
@@ -475,6 +483,7 @@ class SpeechRecognition(Node):
475
483
  multiple=True,
476
484
  support_file_types=[".wav", ".mp3", ".mp4", ".m4a", ".wma", ".aac", ".ogg", ".amr", ".flac"],
477
485
  condition="fields_data.get('files_or_urls') == 'files'",
486
+ show=True,
478
487
  ),
479
488
  "urls": InputPort(
480
489
  name="urls",
File without changes