vectorvein 0.2.25__tar.gz → 0.2.26__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {vectorvein-0.2.25 → vectorvein-0.2.26}/PKG-INFO +1 -1
  2. {vectorvein-0.2.25 → vectorvein-0.2.26}/pyproject.toml +1 -1
  3. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/anthropic_client.py +62 -48
  4. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/edge.py +6 -6
  5. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/workflow.py +8 -1
  6. {vectorvein-0.2.25 → vectorvein-0.2.26}/README.md +0 -0
  7. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/__init__.py +0 -0
  8. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/api/__init__.py +0 -0
  9. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/api/client.py +0 -0
  10. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/api/exceptions.py +0 -0
  11. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/api/models.py +0 -0
  12. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/__init__.py +0 -0
  13. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
  14. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/base_client.py +0 -0
  15. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
  16. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/ernie_client.py +0 -0
  17. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/gemini_client.py +0 -0
  18. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/groq_client.py +0 -0
  19. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/local_client.py +0 -0
  20. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/minimax_client.py +0 -0
  21. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/mistral_client.py +0 -0
  22. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
  23. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/openai_client.py +0 -0
  24. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
  25. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/py.typed +0 -0
  26. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/qwen_client.py +0 -0
  27. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
  28. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/utils.py +0 -0
  29. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/xai_client.py +0 -0
  30. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/yi_client.py +0 -0
  31. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
  32. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/py.typed +0 -0
  33. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/server/token_server.py +0 -0
  34. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/settings/__init__.py +0 -0
  35. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/settings/py.typed +0 -0
  36. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/__init__.py +0 -0
  37. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/defaults.py +0 -0
  38. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/enums.py +0 -0
  39. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/exception.py +0 -0
  40. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/llm_parameters.py +0 -0
  41. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/py.typed +0 -0
  42. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/types/settings.py +0 -0
  43. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/utilities/media_processing.py +0 -0
  44. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/utilities/rate_limiter.py +0 -0
  45. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/utilities/retry.py +0 -0
  46. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/node.py +0 -0
  47. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/port.py +0 -0
  48. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/__init__.py +0 -0
  49. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
  50. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
  51. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
  52. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
  53. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/llms.py +0 -0
  54. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
  55. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
  56. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/output.py +0 -0
  57. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
  58. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
  59. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/tools.py +0 -0
  60. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/triggers.py +0 -0
  61. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
  62. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
  63. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
  64. {vectorvein-0.2.25 → vectorvein-0.2.26}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.25
3
+ Version: 0.2.26
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -17,7 +17,7 @@ description = "VectorVein Python SDK"
17
17
  name = "vectorvein"
18
18
  readme = "README.md"
19
19
  requires-python = ">=3.10"
20
- version = "0.2.25"
20
+ version = "0.2.26"
21
21
 
22
22
  [project.license]
23
23
  text = "MIT"
@@ -35,6 +35,7 @@ from anthropic import (
35
35
  AsyncAnthropicBedrock,
36
36
  )
37
37
  from anthropic._types import NOT_GIVEN
38
+ from anthropic._exceptions import APIStatusError as AnthropicAPIStatusError
38
39
  from anthropic.types import (
39
40
  TextBlock,
40
41
  ThinkingBlock,
@@ -52,6 +53,7 @@ from ..types import defaults as defs
52
53
  from .utils import cutoff_messages, get_message_token_counts
53
54
  from .base_client import BaseChatClient, BaseAsyncChatClient
54
55
  from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
56
+ from ..types.exception import APIStatusError
55
57
  from ..types.enums import ContextLengthControlType, BackendType
56
58
  from ..types.llm_parameters import (
57
59
  Usage,
@@ -601,18 +603,21 @@ class AnthropicChatClient(BaseChatClient):
601
603
  self._acquire_rate_limit(self.endpoint, self.model, messages)
602
604
 
603
605
  if self.stream:
604
- stream_response = raw_client.messages.create(
605
- model=self.model_id,
606
- messages=messages,
607
- system=system_prompt,
608
- stream=True,
609
- temperature=self.temperature,
610
- max_tokens=max_tokens,
611
- tools=tools_params,
612
- tool_choice=tool_choice_param,
613
- top_p=top_p,
614
- thinking=thinking,
615
- )
606
+ try:
607
+ stream_response = raw_client.messages.create(
608
+ model=self.model_id,
609
+ messages=messages,
610
+ system=system_prompt,
611
+ stream=True,
612
+ temperature=self.temperature,
613
+ max_tokens=max_tokens,
614
+ tools=tools_params,
615
+ tool_choice=tool_choice_param,
616
+ top_p=top_p,
617
+ thinking=thinking,
618
+ )
619
+ except AnthropicAPIStatusError as e:
620
+ raise APIStatusError(message=e.message, response=e.response, body=e.body)
616
621
 
617
622
  def generator():
618
623
  result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
@@ -675,18 +680,21 @@ class AnthropicChatClient(BaseChatClient):
675
680
 
676
681
  return generator()
677
682
  else:
678
- response = raw_client.messages.create(
679
- model=self.model_id,
680
- messages=messages,
681
- system=system_prompt,
682
- stream=False,
683
- temperature=self.temperature,
684
- max_tokens=max_tokens,
685
- tools=tools_params,
686
- tool_choice=tool_choice_param,
687
- top_p=top_p,
688
- thinking=thinking,
689
- )
683
+ try:
684
+ response = raw_client.messages.create(
685
+ model=self.model_id,
686
+ messages=messages,
687
+ system=system_prompt,
688
+ stream=False,
689
+ temperature=self.temperature,
690
+ max_tokens=max_tokens,
691
+ tools=tools_params,
692
+ tool_choice=tool_choice_param,
693
+ top_p=top_p,
694
+ thinking=thinking,
695
+ )
696
+ except AnthropicAPIStatusError as e:
697
+ raise APIStatusError(message=e.message, response=e.response, body=e.body)
690
698
 
691
699
  result = {
692
700
  "content": "",
@@ -1140,18 +1148,21 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1140
1148
  await self._acquire_rate_limit(self.endpoint, self.model, messages)
1141
1149
 
1142
1150
  if self.stream:
1143
- stream_response = await raw_client.messages.create(
1144
- model=self.model_id,
1145
- messages=messages,
1146
- system=system_prompt,
1147
- stream=True,
1148
- temperature=self.temperature,
1149
- max_tokens=max_tokens,
1150
- tools=tools_params,
1151
- tool_choice=tool_choice_param,
1152
- top_p=top_p,
1153
- thinking=thinking,
1154
- )
1151
+ try:
1152
+ stream_response = await raw_client.messages.create(
1153
+ model=self.model_id,
1154
+ messages=messages,
1155
+ system=system_prompt,
1156
+ stream=True,
1157
+ temperature=self.temperature,
1158
+ max_tokens=max_tokens,
1159
+ tools=tools_params,
1160
+ tool_choice=tool_choice_param,
1161
+ top_p=top_p,
1162
+ thinking=thinking,
1163
+ )
1164
+ except AnthropicAPIStatusError as e:
1165
+ raise APIStatusError(message=e.message, response=e.response, body=e.body)
1155
1166
 
1156
1167
  async def generator():
1157
1168
  result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
@@ -1214,18 +1225,21 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
1214
1225
 
1215
1226
  return generator()
1216
1227
  else:
1217
- response = await raw_client.messages.create(
1218
- model=self.model_id,
1219
- messages=messages,
1220
- system=system_prompt,
1221
- stream=False,
1222
- temperature=self.temperature,
1223
- max_tokens=max_tokens,
1224
- tools=tools_params,
1225
- tool_choice=tool_choice_param,
1226
- top_p=top_p,
1227
- thinking=thinking,
1228
- )
1228
+ try:
1229
+ response = await raw_client.messages.create(
1230
+ model=self.model_id,
1231
+ messages=messages,
1232
+ system=system_prompt,
1233
+ stream=False,
1234
+ temperature=self.temperature,
1235
+ max_tokens=max_tokens,
1236
+ tools=tools_params,
1237
+ tool_choice=tool_choice_param,
1238
+ top_p=top_p,
1239
+ thinking=thinking,
1240
+ )
1241
+ except AnthropicAPIStatusError as e:
1242
+ raise APIStatusError(message=e.message, response=e.response, body=e.body)
1229
1243
 
1230
1244
  result = {
1231
1245
  "content": "",
@@ -6,17 +6,17 @@ class Edge:
6
6
  self,
7
7
  id: str,
8
8
  source: str,
9
- sourceHandle: str,
9
+ source_handle: str,
10
10
  target: str,
11
- targetHandle: str,
11
+ target_handle: str,
12
12
  animated: bool = True,
13
13
  type: str = "default",
14
14
  ) -> None:
15
15
  self.id: str = id
16
16
  self.source: str = source
17
- self.sourceHandle: str = sourceHandle
17
+ self.source_handle: str = source_handle
18
18
  self.target: str = target
19
- self.targetHandle: str = targetHandle
19
+ self.target_handle: str = target_handle
20
20
  self.animated: bool = animated
21
21
  self.type: str = type
22
22
  self.style: Dict[str, Union[str, int]] = {"stroke": "#28c5e5", "strokeWidth": 3}
@@ -25,9 +25,9 @@ class Edge:
25
25
  return {
26
26
  "id": self.id,
27
27
  "source": self.source,
28
- "sourceHandle": self.sourceHandle,
28
+ "sourceHandle": self.source_handle,
29
29
  "target": self.target,
30
- "targetHandle": self.targetHandle,
30
+ "targetHandle": self.target_handle,
31
31
  "animated": self.animated,
32
32
  "type": self.type,
33
33
  "style": self.style,
@@ -65,6 +65,13 @@ class Workflow:
65
65
  if not target_node_obj.has_input_port(target_port):
66
66
  raise ValueError(f"目标节点 {target_node_id} 不存在输入端口: {target_port}")
67
67
 
68
+ # 检查目标端口是否已有被连接的线
69
+ for edge in self.edges:
70
+ if edge.target == target_node_id and edge.target_handle == target_port:
71
+ raise ValueError(
72
+ f"目标节点 {target_node_id} 的输入端口 {target_port} 已经被连接: {edge.source}({edge.source_handle}) → {edge.target}({edge.target_handle})"
73
+ )
74
+
68
75
  # 创建并添加边
69
76
  edge_id = f"vueflow__edge-{source_node_id}{source_port}-{target_node_id}{target_port}"
70
77
  edge = Edge(edge_id, source_node_id, source_port, target_node_id, target_port)
@@ -112,7 +119,7 @@ class Workflow:
112
119
  for edge in self.edges:
113
120
  source_label = node_id_to_label[edge.source]
114
121
  target_label = node_id_to_label[edge.target]
115
- label = f"{edge.sourceHandle} → {edge.targetHandle}"
122
+ label = f"{edge.source_handle} → {edge.target_handle}"
116
123
  lines.append(f" {source_label} -->|{label}| {target_label}")
117
124
 
118
125
  return "\n".join(lines)
File without changes