vectorvein 0.2.24__tar.gz → 0.2.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vectorvein-0.2.24 → vectorvein-0.2.26}/PKG-INFO +1 -1
- {vectorvein-0.2.24 → vectorvein-0.2.26}/pyproject.toml +1 -1
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/anthropic_client.py +62 -48
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/edge.py +6 -6
- vectorvein-0.2.26/src/vectorvein/workflow/graph/workflow.py +224 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/llms.py +97 -8
- vectorvein-0.2.24/src/vectorvein/workflow/graph/workflow.py +0 -111
- {vectorvein-0.2.24 → vectorvein-0.2.26}/README.md +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/api/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/api/client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/api/exceptions.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/api/models.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/base_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/ernie_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/openai_compatible_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/utils.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/settings/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/defaults.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/types/settings.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/utilities/rate_limiter.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/node.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/graph/port.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/__init__.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/media_editing.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/output.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/tools.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/triggers.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
- {vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
@@ -35,6 +35,7 @@ from anthropic import (
|
|
35
35
|
AsyncAnthropicBedrock,
|
36
36
|
)
|
37
37
|
from anthropic._types import NOT_GIVEN
|
38
|
+
from anthropic._exceptions import APIStatusError as AnthropicAPIStatusError
|
38
39
|
from anthropic.types import (
|
39
40
|
TextBlock,
|
40
41
|
ThinkingBlock,
|
@@ -52,6 +53,7 @@ from ..types import defaults as defs
|
|
52
53
|
from .utils import cutoff_messages, get_message_token_counts
|
53
54
|
from .base_client import BaseChatClient, BaseAsyncChatClient
|
54
55
|
from .openai_compatible_client import OpenAICompatibleChatClient, AsyncOpenAICompatibleChatClient
|
56
|
+
from ..types.exception import APIStatusError
|
55
57
|
from ..types.enums import ContextLengthControlType, BackendType
|
56
58
|
from ..types.llm_parameters import (
|
57
59
|
Usage,
|
@@ -601,18 +603,21 @@ class AnthropicChatClient(BaseChatClient):
|
|
601
603
|
self._acquire_rate_limit(self.endpoint, self.model, messages)
|
602
604
|
|
603
605
|
if self.stream:
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
606
|
+
try:
|
607
|
+
stream_response = raw_client.messages.create(
|
608
|
+
model=self.model_id,
|
609
|
+
messages=messages,
|
610
|
+
system=system_prompt,
|
611
|
+
stream=True,
|
612
|
+
temperature=self.temperature,
|
613
|
+
max_tokens=max_tokens,
|
614
|
+
tools=tools_params,
|
615
|
+
tool_choice=tool_choice_param,
|
616
|
+
top_p=top_p,
|
617
|
+
thinking=thinking,
|
618
|
+
)
|
619
|
+
except AnthropicAPIStatusError as e:
|
620
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
616
621
|
|
617
622
|
def generator():
|
618
623
|
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
|
@@ -675,18 +680,21 @@ class AnthropicChatClient(BaseChatClient):
|
|
675
680
|
|
676
681
|
return generator()
|
677
682
|
else:
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
683
|
+
try:
|
684
|
+
response = raw_client.messages.create(
|
685
|
+
model=self.model_id,
|
686
|
+
messages=messages,
|
687
|
+
system=system_prompt,
|
688
|
+
stream=False,
|
689
|
+
temperature=self.temperature,
|
690
|
+
max_tokens=max_tokens,
|
691
|
+
tools=tools_params,
|
692
|
+
tool_choice=tool_choice_param,
|
693
|
+
top_p=top_p,
|
694
|
+
thinking=thinking,
|
695
|
+
)
|
696
|
+
except AnthropicAPIStatusError as e:
|
697
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
690
698
|
|
691
699
|
result = {
|
692
700
|
"content": "",
|
@@ -1140,18 +1148,21 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1140
1148
|
await self._acquire_rate_limit(self.endpoint, self.model, messages)
|
1141
1149
|
|
1142
1150
|
if self.stream:
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1150
|
-
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1151
|
+
try:
|
1152
|
+
stream_response = await raw_client.messages.create(
|
1153
|
+
model=self.model_id,
|
1154
|
+
messages=messages,
|
1155
|
+
system=system_prompt,
|
1156
|
+
stream=True,
|
1157
|
+
temperature=self.temperature,
|
1158
|
+
max_tokens=max_tokens,
|
1159
|
+
tools=tools_params,
|
1160
|
+
tool_choice=tool_choice_param,
|
1161
|
+
top_p=top_p,
|
1162
|
+
thinking=thinking,
|
1163
|
+
)
|
1164
|
+
except AnthropicAPIStatusError as e:
|
1165
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
1155
1166
|
|
1156
1167
|
async def generator():
|
1157
1168
|
result = {"content": "", "reasoning_content": "", "usage": {}, "tool_calls": []}
|
@@ -1214,18 +1225,21 @@ class AsyncAnthropicChatClient(BaseAsyncChatClient):
|
|
1214
1225
|
|
1215
1226
|
return generator()
|
1216
1227
|
else:
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1228
|
+
try:
|
1229
|
+
response = await raw_client.messages.create(
|
1230
|
+
model=self.model_id,
|
1231
|
+
messages=messages,
|
1232
|
+
system=system_prompt,
|
1233
|
+
stream=False,
|
1234
|
+
temperature=self.temperature,
|
1235
|
+
max_tokens=max_tokens,
|
1236
|
+
tools=tools_params,
|
1237
|
+
tool_choice=tool_choice_param,
|
1238
|
+
top_p=top_p,
|
1239
|
+
thinking=thinking,
|
1240
|
+
)
|
1241
|
+
except AnthropicAPIStatusError as e:
|
1242
|
+
raise APIStatusError(message=e.message, response=e.response, body=e.body)
|
1229
1243
|
|
1230
1244
|
result = {
|
1231
1245
|
"content": "",
|
@@ -6,17 +6,17 @@ class Edge:
|
|
6
6
|
self,
|
7
7
|
id: str,
|
8
8
|
source: str,
|
9
|
-
|
9
|
+
source_handle: str,
|
10
10
|
target: str,
|
11
|
-
|
11
|
+
target_handle: str,
|
12
12
|
animated: bool = True,
|
13
13
|
type: str = "default",
|
14
14
|
) -> None:
|
15
15
|
self.id: str = id
|
16
16
|
self.source: str = source
|
17
|
-
self.
|
17
|
+
self.source_handle: str = source_handle
|
18
18
|
self.target: str = target
|
19
|
-
self.
|
19
|
+
self.target_handle: str = target_handle
|
20
20
|
self.animated: bool = animated
|
21
21
|
self.type: str = type
|
22
22
|
self.style: Dict[str, Union[str, int]] = {"stroke": "#28c5e5", "strokeWidth": 3}
|
@@ -25,9 +25,9 @@ class Edge:
|
|
25
25
|
return {
|
26
26
|
"id": self.id,
|
27
27
|
"source": self.source,
|
28
|
-
"sourceHandle": self.
|
28
|
+
"sourceHandle": self.source_handle,
|
29
29
|
"target": self.target,
|
30
|
-
"targetHandle": self.
|
30
|
+
"targetHandle": self.target_handle,
|
31
31
|
"animated": self.animated,
|
32
32
|
"type": self.type,
|
33
33
|
"style": self.style,
|
@@ -0,0 +1,224 @@
|
|
1
|
+
import json
|
2
|
+
from typing import List, Union, TypedDict
|
3
|
+
|
4
|
+
from .node import Node
|
5
|
+
from .edge import Edge
|
6
|
+
|
7
|
+
|
8
|
+
class WorkflowCheckResult(TypedDict):
|
9
|
+
"""工作流检查结果类型。"""
|
10
|
+
|
11
|
+
no_cycle: bool # 工作流是否不包含环
|
12
|
+
no_isolated_nodes: bool # 工作流是否不包含孤立节点
|
13
|
+
|
14
|
+
|
15
|
+
class Workflow:
|
16
|
+
def __init__(self) -> None:
|
17
|
+
self.nodes: List[Node] = []
|
18
|
+
self.edges: List[Edge] = []
|
19
|
+
|
20
|
+
def add_node(self, node: Node):
|
21
|
+
self.nodes.append(node)
|
22
|
+
|
23
|
+
def add_nodes(self, nodes: List[Node]):
|
24
|
+
self.nodes.extend(nodes)
|
25
|
+
|
26
|
+
def add_edge(self, edge: Edge):
|
27
|
+
self.edges.append(edge)
|
28
|
+
|
29
|
+
def connect(
|
30
|
+
self,
|
31
|
+
source_node: Union[str, Node],
|
32
|
+
source_port: str,
|
33
|
+
target_node: Union[str, Node],
|
34
|
+
target_port: str,
|
35
|
+
):
|
36
|
+
# 获取源节点ID
|
37
|
+
if isinstance(source_node, Node):
|
38
|
+
source_node_id = source_node.id
|
39
|
+
else:
|
40
|
+
source_node_id = source_node
|
41
|
+
|
42
|
+
# 获取目标节点ID
|
43
|
+
if isinstance(target_node, Node):
|
44
|
+
target_node_id = target_node.id
|
45
|
+
else:
|
46
|
+
target_node_id = target_node
|
47
|
+
|
48
|
+
# 检查源节点是否存在
|
49
|
+
source_node_exists = any(node.id == source_node_id for node in self.nodes)
|
50
|
+
if not source_node_exists:
|
51
|
+
raise ValueError(f"源节点不存在: {source_node_id}")
|
52
|
+
|
53
|
+
# 检查目标节点是否存在
|
54
|
+
target_node_exists = any(node.id == target_node_id for node in self.nodes)
|
55
|
+
if not target_node_exists:
|
56
|
+
raise ValueError(f"目标节点不存在: {target_node_id}")
|
57
|
+
|
58
|
+
# 检查源节点的端口是否存在
|
59
|
+
source_node_obj = next(node for node in self.nodes if node.id == source_node_id)
|
60
|
+
if not source_node_obj.has_output_port(source_port):
|
61
|
+
raise ValueError(f"源节点 {source_node_id} 不存在输出端口: {source_port}")
|
62
|
+
|
63
|
+
# 检查目标节点的端口是否存在
|
64
|
+
target_node_obj = next(node for node in self.nodes if node.id == target_node_id)
|
65
|
+
if not target_node_obj.has_input_port(target_port):
|
66
|
+
raise ValueError(f"目标节点 {target_node_id} 不存在输入端口: {target_port}")
|
67
|
+
|
68
|
+
# 检查目标端口是否已有被连接的线
|
69
|
+
for edge in self.edges:
|
70
|
+
if edge.target == target_node_id and edge.target_handle == target_port:
|
71
|
+
raise ValueError(
|
72
|
+
f"目标节点 {target_node_id} 的输入端口 {target_port} 已经被连接: {edge.source}({edge.source_handle}) → {edge.target}({edge.target_handle})"
|
73
|
+
)
|
74
|
+
|
75
|
+
# 创建并添加边
|
76
|
+
edge_id = f"vueflow__edge-{source_node_id}{source_port}-{target_node_id}{target_port}"
|
77
|
+
edge = Edge(edge_id, source_node_id, source_port, target_node_id, target_port)
|
78
|
+
self.add_edge(edge)
|
79
|
+
|
80
|
+
def to_dict(self):
|
81
|
+
return {
|
82
|
+
"nodes": [node.to_dict() for node in self.nodes],
|
83
|
+
"edges": [edge.to_dict() for edge in self.edges],
|
84
|
+
"viewport": {"x": 0, "y": 0, "zoom": 1},
|
85
|
+
}
|
86
|
+
|
87
|
+
def to_json(self, ensure_ascii=False):
|
88
|
+
return json.dumps(self.to_dict(), ensure_ascii=ensure_ascii)
|
89
|
+
|
90
|
+
def to_mermaid(self) -> str:
|
91
|
+
"""生成 Mermaid 格式的流程图。
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
str: Mermaid 格式的流程图文本
|
95
|
+
"""
|
96
|
+
lines = ["flowchart TD"]
|
97
|
+
|
98
|
+
# 创建节点类型到序号的映射
|
99
|
+
type_counters = {}
|
100
|
+
node_id_to_label = {}
|
101
|
+
|
102
|
+
# 首先为所有节点生成标签
|
103
|
+
for node in self.nodes:
|
104
|
+
node_type = node.type.lower()
|
105
|
+
if node_type not in type_counters:
|
106
|
+
type_counters[node_type] = 0
|
107
|
+
node_label = f"{node_type}_{type_counters[node_type]}"
|
108
|
+
node_id_to_label[node.id] = node_label
|
109
|
+
type_counters[node_type] += 1
|
110
|
+
|
111
|
+
# 添加节点定义
|
112
|
+
for node in self.nodes:
|
113
|
+
node_label = node_id_to_label[node.id]
|
114
|
+
lines.append(f' {node_label}["{node_label} ({node.type})"]')
|
115
|
+
|
116
|
+
lines.append("") # 添加一个空行分隔节点和边的定义
|
117
|
+
|
118
|
+
# 添加边的定义
|
119
|
+
for edge in self.edges:
|
120
|
+
source_label = node_id_to_label[edge.source]
|
121
|
+
target_label = node_id_to_label[edge.target]
|
122
|
+
label = f"{edge.source_handle} → {edge.target_handle}"
|
123
|
+
lines.append(f" {source_label} -->|{label}| {target_label}")
|
124
|
+
|
125
|
+
return "\n".join(lines)
|
126
|
+
|
127
|
+
def _check_dag(self) -> WorkflowCheckResult:
|
128
|
+
"""检查流程图是否为有向无环图,并检测是否存在孤立节点。
|
129
|
+
|
130
|
+
Returns:
|
131
|
+
WorkflowCheckResult: 包含检查结果的字典
|
132
|
+
- no_cycle (bool): 如果流程图是有向无环图返回 True,否则返回 False
|
133
|
+
- no_isolated_nodes (bool): 如果不存在孤立节点返回 True,否则返回 False
|
134
|
+
"""
|
135
|
+
result: WorkflowCheckResult = {"no_cycle": True, "no_isolated_nodes": True}
|
136
|
+
|
137
|
+
# 过滤掉触发器节点和辅助节点
|
138
|
+
trigger_nodes = [
|
139
|
+
node.id
|
140
|
+
for node in self.nodes
|
141
|
+
if hasattr(node, "category") and (node.category == "triggers" or node.category == "assistedNodes")
|
142
|
+
]
|
143
|
+
|
144
|
+
# 获取需要检查的节点和边
|
145
|
+
regular_nodes = [node.id for node in self.nodes if node.id not in trigger_nodes]
|
146
|
+
regular_edges = [
|
147
|
+
edge for edge in self.edges if edge.source not in trigger_nodes and edge.target not in trigger_nodes
|
148
|
+
]
|
149
|
+
|
150
|
+
# ---------- 检查有向图是否有环 ----------
|
151
|
+
# 构建邻接表
|
152
|
+
adjacency = {node_id: [] for node_id in regular_nodes}
|
153
|
+
for edge in regular_edges:
|
154
|
+
if edge.source in adjacency: # 确保节点在字典中
|
155
|
+
adjacency[edge.source].append(edge.target)
|
156
|
+
|
157
|
+
# 三种状态: 0 = 未访问, 1 = 正在访问, 2 = 已访问完成
|
158
|
+
visited = {node_id: 0 for node_id in regular_nodes}
|
159
|
+
|
160
|
+
def dfs_cycle_detection(node_id):
|
161
|
+
# 如果节点正在被访问,说明找到了环
|
162
|
+
if visited[node_id] == 1:
|
163
|
+
return False
|
164
|
+
|
165
|
+
# 如果节点已经访问完成,无需再次访问
|
166
|
+
if visited[node_id] == 2:
|
167
|
+
return True
|
168
|
+
|
169
|
+
# 标记为正在访问
|
170
|
+
visited[node_id] = 1
|
171
|
+
|
172
|
+
# 访问所有邻居
|
173
|
+
for neighbor in adjacency[node_id]:
|
174
|
+
if neighbor in visited and not dfs_cycle_detection(neighbor):
|
175
|
+
return False
|
176
|
+
|
177
|
+
# 标记为已访问完成
|
178
|
+
visited[node_id] = 2
|
179
|
+
return True
|
180
|
+
|
181
|
+
# 对每个未访问的节点进行 DFS 检测环
|
182
|
+
for node_id in regular_nodes:
|
183
|
+
if visited[node_id] == 0:
|
184
|
+
if not dfs_cycle_detection(node_id):
|
185
|
+
result["no_cycle"] = False
|
186
|
+
break
|
187
|
+
|
188
|
+
# ---------- 检查是否存在孤立节点 ----------
|
189
|
+
# 构建无向图邻接表
|
190
|
+
undirected_adjacency = {node_id: [] for node_id in regular_nodes}
|
191
|
+
for edge in regular_edges:
|
192
|
+
if edge.source in undirected_adjacency and edge.target in undirected_adjacency:
|
193
|
+
undirected_adjacency[edge.source].append(edge.target)
|
194
|
+
undirected_adjacency[edge.target].append(edge.source)
|
195
|
+
|
196
|
+
# 深度优先搜索来检测连通分量
|
197
|
+
undirected_visited = set()
|
198
|
+
|
199
|
+
def dfs_connected_components(node_id):
|
200
|
+
undirected_visited.add(node_id)
|
201
|
+
for neighbor in undirected_adjacency[node_id]:
|
202
|
+
if neighbor not in undirected_visited:
|
203
|
+
dfs_connected_components(neighbor)
|
204
|
+
|
205
|
+
# 计算连通分量数量
|
206
|
+
connected_components_count = 0
|
207
|
+
for node_id in regular_nodes:
|
208
|
+
if node_id not in undirected_visited:
|
209
|
+
connected_components_count += 1
|
210
|
+
dfs_connected_components(node_id)
|
211
|
+
|
212
|
+
# 如果连通分量数量大于1,说明存在孤立节点
|
213
|
+
if connected_components_count > 1 and len(regular_nodes) > 0:
|
214
|
+
result["no_isolated_nodes"] = False
|
215
|
+
|
216
|
+
return result
|
217
|
+
|
218
|
+
def check(self) -> WorkflowCheckResult:
|
219
|
+
"""检查流程图的有效性。
|
220
|
+
|
221
|
+
Returns:
|
222
|
+
WorkflowCheckResult: 包含各种检查结果的字典
|
223
|
+
"""
|
224
|
+
return self._check_dag()
|
@@ -292,6 +292,8 @@ class Claude(Node):
|
|
292
292
|
port_type=PortType.SELECT,
|
293
293
|
value="claude-3-5-haiku",
|
294
294
|
options=[
|
295
|
+
{"value": "claude-3-7-sonnet-thinking", "label": "claude-3-7-sonnet-thinking"},
|
296
|
+
{"value": "claude-3-7-sonnet", "label": "claude-3-7-sonnet"},
|
295
297
|
{"value": "claude-3-5-sonnet", "label": "claude-3-5-sonnet"},
|
296
298
|
{"value": "claude-3-5-haiku", "label": "claude-3-5-haiku"},
|
297
299
|
{"value": "claude-3-opus", "label": "claude-3-opus"},
|
@@ -338,8 +340,8 @@ class Deepseek(Node):
|
|
338
340
|
value="deepseek-chat",
|
339
341
|
options=[
|
340
342
|
{"value": "deepseek-chat", "label": "deepseek-chat"},
|
341
|
-
{"value": "deepseek-reasoner", "label": "deepseek-
|
342
|
-
{"value": "deepseek-
|
343
|
+
{"value": "deepseek-reasoner", "label": "deepseek-r1"},
|
344
|
+
{"value": "deepseek-r1-distill-qwen-32b", "label": "deepseek-r1-distill-qwen-32b"},
|
343
345
|
],
|
344
346
|
),
|
345
347
|
"temperature": InputPort(
|
@@ -523,12 +525,10 @@ class LingYiWanWu(Node):
|
|
523
525
|
port_type=PortType.SELECT,
|
524
526
|
value="yi-lightning",
|
525
527
|
options=[
|
526
|
-
{
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
{"value": "yi-medium-200k", "label": "yi-medium-200k"},
|
531
|
-
{"value": "yi-spark", "label": "yi-spark"},
|
528
|
+
{
|
529
|
+
"value": "yi-lightning",
|
530
|
+
"label": "yi-lightning",
|
531
|
+
},
|
532
532
|
],
|
533
533
|
),
|
534
534
|
"temperature": InputPort(
|
@@ -746,6 +746,7 @@ class OpenAI(Node):
|
|
746
746
|
{"value": "gpt-4o-mini", "label": "gpt-4o-mini"},
|
747
747
|
{"value": "o1-mini", "label": "o1-mini"},
|
748
748
|
{"value": "o1-preview", "label": "o1-preview"},
|
749
|
+
{"value": "o3-mini", "label": "o3-mini"},
|
749
750
|
],
|
750
751
|
),
|
751
752
|
"temperature": InputPort(
|
@@ -893,3 +894,91 @@ class XAi(Node):
|
|
893
894
|
),
|
894
895
|
},
|
895
896
|
)
|
897
|
+
|
898
|
+
|
899
|
+
class CustomModel(Node):
|
900
|
+
def __init__(self, id: Optional[str] = None):
|
901
|
+
super().__init__(
|
902
|
+
node_type="CustomModel",
|
903
|
+
category="llms",
|
904
|
+
task_name="llms.custom_model",
|
905
|
+
node_id=id,
|
906
|
+
ports={
|
907
|
+
"prompt": InputPort(
|
908
|
+
name="prompt",
|
909
|
+
port_type=PortType.TEXTAREA,
|
910
|
+
value="",
|
911
|
+
),
|
912
|
+
"model_family": InputPort(
|
913
|
+
name="model_family",
|
914
|
+
port_type=PortType.SELECT,
|
915
|
+
value="",
|
916
|
+
options=[],
|
917
|
+
),
|
918
|
+
"llm_model": InputPort(
|
919
|
+
name="llm_model",
|
920
|
+
port_type=PortType.SELECT,
|
921
|
+
value="",
|
922
|
+
options=[],
|
923
|
+
),
|
924
|
+
"temperature": InputPort(
|
925
|
+
name="temperature",
|
926
|
+
port_type=PortType.TEMPERATURE,
|
927
|
+
value=0.7,
|
928
|
+
),
|
929
|
+
"top_p": InputPort(
|
930
|
+
name="top_p",
|
931
|
+
port_type=PortType.NUMBER,
|
932
|
+
value=0.95,
|
933
|
+
),
|
934
|
+
"stream": InputPort(
|
935
|
+
name="stream",
|
936
|
+
port_type=PortType.CHECKBOX,
|
937
|
+
value=False,
|
938
|
+
),
|
939
|
+
"system_prompt": InputPort(
|
940
|
+
name="system_prompt",
|
941
|
+
port_type=PortType.TEXTAREA,
|
942
|
+
value="",
|
943
|
+
),
|
944
|
+
"response_format": InputPort(
|
945
|
+
name="response_format",
|
946
|
+
port_type=PortType.SELECT,
|
947
|
+
value="text",
|
948
|
+
options=[
|
949
|
+
{"value": "text", "label": "Text"},
|
950
|
+
{"value": "json_object", "label": "JSON"},
|
951
|
+
],
|
952
|
+
),
|
953
|
+
"use_function_call": InputPort(
|
954
|
+
name="use_function_call",
|
955
|
+
port_type=PortType.CHECKBOX,
|
956
|
+
value=False,
|
957
|
+
),
|
958
|
+
"functions": InputPort(
|
959
|
+
name="functions",
|
960
|
+
port_type=PortType.SELECT,
|
961
|
+
value=[],
|
962
|
+
),
|
963
|
+
"function_call_mode": InputPort(
|
964
|
+
name="function_call_mode",
|
965
|
+
port_type=PortType.SELECT,
|
966
|
+
value="auto",
|
967
|
+
options=[
|
968
|
+
{"value": "auto", "label": "auto"},
|
969
|
+
{"value": "none", "label": "none"},
|
970
|
+
],
|
971
|
+
),
|
972
|
+
"output": OutputPort(
|
973
|
+
name="output",
|
974
|
+
),
|
975
|
+
"function_call_output": OutputPort(
|
976
|
+
name="function_call_output",
|
977
|
+
condition="return fieldsData.use_function_call.value",
|
978
|
+
),
|
979
|
+
"function_call_arguments": OutputPort(
|
980
|
+
name="function_call_arguments",
|
981
|
+
condition="return fieldsData.use_function_call.value",
|
982
|
+
),
|
983
|
+
},
|
984
|
+
)
|
@@ -1,111 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from typing import List, Union
|
3
|
-
|
4
|
-
from .node import Node
|
5
|
-
from .edge import Edge
|
6
|
-
|
7
|
-
|
8
|
-
class Workflow:
|
9
|
-
def __init__(self) -> None:
|
10
|
-
self.nodes: List[Node] = []
|
11
|
-
self.edges: List[Edge] = []
|
12
|
-
|
13
|
-
def add_node(self, node: Node):
|
14
|
-
self.nodes.append(node)
|
15
|
-
|
16
|
-
def add_nodes(self, nodes: List[Node]):
|
17
|
-
self.nodes.extend(nodes)
|
18
|
-
|
19
|
-
def add_edge(self, edge: Edge):
|
20
|
-
self.edges.append(edge)
|
21
|
-
|
22
|
-
def connect(
|
23
|
-
self,
|
24
|
-
source_node: Union[str, Node],
|
25
|
-
source_port: str,
|
26
|
-
target_node: Union[str, Node],
|
27
|
-
target_port: str,
|
28
|
-
):
|
29
|
-
# 获取源节点ID
|
30
|
-
if isinstance(source_node, Node):
|
31
|
-
source_node_id = source_node.id
|
32
|
-
else:
|
33
|
-
source_node_id = source_node
|
34
|
-
|
35
|
-
# 获取目标节点ID
|
36
|
-
if isinstance(target_node, Node):
|
37
|
-
target_node_id = target_node.id
|
38
|
-
else:
|
39
|
-
target_node_id = target_node
|
40
|
-
|
41
|
-
# 检查源节点是否存在
|
42
|
-
source_node_exists = any(node.id == source_node_id for node in self.nodes)
|
43
|
-
if not source_node_exists:
|
44
|
-
raise ValueError(f"源节点不存在: {source_node_id}")
|
45
|
-
|
46
|
-
# 检查目标节点是否存在
|
47
|
-
target_node_exists = any(node.id == target_node_id for node in self.nodes)
|
48
|
-
if not target_node_exists:
|
49
|
-
raise ValueError(f"目标节点不存在: {target_node_id}")
|
50
|
-
|
51
|
-
# 检查源节点的端口是否存在
|
52
|
-
source_node_obj = next(node for node in self.nodes if node.id == source_node_id)
|
53
|
-
if not source_node_obj.has_output_port(source_port):
|
54
|
-
raise ValueError(f"源节点 {source_node_id} 不存在输出端口: {source_port}")
|
55
|
-
|
56
|
-
# 检查目标节点的端口是否存在
|
57
|
-
target_node_obj = next(node for node in self.nodes if node.id == target_node_id)
|
58
|
-
if not target_node_obj.has_input_port(target_port):
|
59
|
-
raise ValueError(f"目标节点 {target_node_id} 不存在输入端口: {target_port}")
|
60
|
-
|
61
|
-
# 创建并添加边
|
62
|
-
edge_id = f"vueflow__edge-{source_node_id}{source_port}-{target_node_id}{target_port}"
|
63
|
-
edge = Edge(edge_id, source_node_id, source_port, target_node_id, target_port)
|
64
|
-
self.add_edge(edge)
|
65
|
-
|
66
|
-
def to_dict(self):
|
67
|
-
return {
|
68
|
-
"nodes": [node.to_dict() for node in self.nodes],
|
69
|
-
"edges": [edge.to_dict() for edge in self.edges],
|
70
|
-
"viewport": {"x": 0, "y": 0, "zoom": 1},
|
71
|
-
}
|
72
|
-
|
73
|
-
def to_json(self, ensure_ascii=False):
|
74
|
-
return json.dumps(self.to_dict(), ensure_ascii=ensure_ascii)
|
75
|
-
|
76
|
-
def to_mermaid(self) -> str:
|
77
|
-
"""生成 Mermaid 格式的流程图。
|
78
|
-
|
79
|
-
Returns:
|
80
|
-
str: Mermaid 格式的流程图文本
|
81
|
-
"""
|
82
|
-
lines = ["flowchart TD"]
|
83
|
-
|
84
|
-
# 创建节点类型到序号的映射
|
85
|
-
type_counters = {}
|
86
|
-
node_id_to_label = {}
|
87
|
-
|
88
|
-
# 首先为所有节点生成标签
|
89
|
-
for node in self.nodes:
|
90
|
-
node_type = node.type.lower()
|
91
|
-
if node_type not in type_counters:
|
92
|
-
type_counters[node_type] = 0
|
93
|
-
node_label = f"{node_type}_{type_counters[node_type]}"
|
94
|
-
node_id_to_label[node.id] = node_label
|
95
|
-
type_counters[node_type] += 1
|
96
|
-
|
97
|
-
# 添加节点定义
|
98
|
-
for node in self.nodes:
|
99
|
-
node_label = node_id_to_label[node.id]
|
100
|
-
lines.append(f' {node_label}["{node_label} ({node.type})"]')
|
101
|
-
|
102
|
-
lines.append("") # 添加一个空行分隔节点和边的定义
|
103
|
-
|
104
|
-
# 添加边的定义
|
105
|
-
for edge in self.edges:
|
106
|
-
source_label = node_id_to_label[edge.source]
|
107
|
-
target_label = node_id_to_label[edge.target]
|
108
|
-
label = f"{edge.sourceHandle} → {edge.targetHandle}"
|
109
|
-
lines.append(f" {source_label} -->|{label}| {target_label}")
|
110
|
-
|
111
|
-
return "\n".join(lines)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{vectorvein-0.2.24 → vectorvein-0.2.26}/src/vectorvein/chat_clients/openai_compatible_client.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|