agstack 1.3.0__tar.gz → 1.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agstack-1.3.0 → agstack-1.4.0}/PKG-INFO +2 -1
- agstack-1.4.0/agstack/llm/flow/flow.py +486 -0
- agstack-1.4.0/agstack/llm/flow/sandbox.py +59 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack.egg-info/PKG-INFO +2 -1
- {agstack-1.3.0 → agstack-1.4.0}/agstack.egg-info/SOURCES.txt +1 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack.egg-info/requires.txt +1 -0
- {agstack-1.3.0 → agstack-1.4.0}/pyproject.toml +2 -1
- agstack-1.3.0/agstack/llm/flow/flow.py +0 -272
- {agstack-1.3.0 → agstack-1.4.0}/LICENSE +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/README.md +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/config/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/config/logger.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/config/manager.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/config/types.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/contexts.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/decorators.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/events.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/exceptions.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/fastapi/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/fastapi/exception.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/fastapi/middleware.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/fastapi/offline.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/fastapi/sse.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/infra/db/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/infra/es/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/infra/kg/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/infra/mq/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/client.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/agent.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/context.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/event.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/exceptions.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/factory.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/loader.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/records.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/registry.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/state.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/flow/tool.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/prompts.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/llm/token.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/registry.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/schema.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/security/__init__.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/security/casbin.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/security/crypt.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack/status.py +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack.egg-info/dependency_links.txt +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/agstack.egg-info/top_level.txt +0 -0
- {agstack-1.3.0 → agstack-1.4.0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agstack
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Production-ready toolkit for building FastAPI and LLM applications
|
|
5
5
|
Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
6
6
|
Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
@@ -33,6 +33,7 @@ Requires-Dist: pycasbin>=2.8.0
|
|
|
33
33
|
Requires-Dist: pydantic>=2.12.4
|
|
34
34
|
Requires-Dist: python-multipart>=0.0.20
|
|
35
35
|
Requires-Dist: requests>=2.32.5
|
|
36
|
+
Requires-Dist: RestrictedPython>=7.0
|
|
36
37
|
Requires-Dist: sqlalchemy[asyncio]>=2.0.48
|
|
37
38
|
Requires-Dist: sqlobjects>=1.3.0
|
|
38
39
|
Requires-Dist: tiktoken>=0.12.0
|
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
# Copyright (c) 2020-2025 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Flow 定义和执行"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json as _json
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
9
|
+
from uuid import uuid4
|
|
10
|
+
|
|
11
|
+
from . import event
|
|
12
|
+
from .exceptions import FlowError
|
|
13
|
+
from .registry import registry
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from .context import FlowContext
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class _SafeFormatDict(dict):
|
|
21
|
+
"""安全的模板变量替换,缺失 key 时保留原始占位符"""
|
|
22
|
+
|
|
23
|
+
def __missing__(self, key: str) -> str:
|
|
24
|
+
return f"{{{key}}}"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class Flow:
|
|
29
|
+
"""Flow 配置定义"""
|
|
30
|
+
|
|
31
|
+
flow_id: str
|
|
32
|
+
name: str
|
|
33
|
+
description: str = ""
|
|
34
|
+
nodes: list[dict[str, Any]] = field(default_factory=list)
|
|
35
|
+
edges: list[dict[str, Any]] = field(default_factory=list)
|
|
36
|
+
variables: dict[str, Any] = field(default_factory=dict)
|
|
37
|
+
|
|
38
|
+
# ── 边驱动路由 ──
|
|
39
|
+
|
|
40
|
+
def _resolve_next_node(self, current_id: str, result: str | None = None) -> str | None:
|
|
41
|
+
"""根据当前节点和执行结果,通过 edges 查找下一节点"""
|
|
42
|
+
for edge in self.edges:
|
|
43
|
+
if edge.get("source") == current_id:
|
|
44
|
+
cond = edge.get("condition")
|
|
45
|
+
if cond is None or cond == result:
|
|
46
|
+
return edge.get("target")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
# ── condition 节点 ──
|
|
50
|
+
|
|
51
|
+
async def _evaluate_condition(self, node: dict, context: "FlowContext") -> str:
|
|
52
|
+
"""调用 LLM 判断条件是否匹配"""
|
|
53
|
+
config = node.get("config", {})
|
|
54
|
+
topic = config.get("topic", "")
|
|
55
|
+
query = context.get_variable("query", "")
|
|
56
|
+
|
|
57
|
+
prompt = (
|
|
58
|
+
f"判断以下问题是否属于「{topic}」相关问题。\n"
|
|
59
|
+
f"问题:{query}\n"
|
|
60
|
+
f'仅回复 JSON:{{"result": "match"}} 或 {{"result": "reject"}}'
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
from ..client import get_llm_client
|
|
64
|
+
|
|
65
|
+
client = get_llm_client()
|
|
66
|
+
response = await client.chat(
|
|
67
|
+
messages=[{"role": "user", "content": prompt}],
|
|
68
|
+
model=config.get("model", "gpt-4o-mini"),
|
|
69
|
+
temperature=0,
|
|
70
|
+
)
|
|
71
|
+
text = response.choices[0].message.content or ""
|
|
72
|
+
try:
|
|
73
|
+
return _json.loads(text).get("result", "reject")
|
|
74
|
+
except Exception:
|
|
75
|
+
return "match" if "match" in text.lower() else "reject"
|
|
76
|
+
|
|
77
|
+
# ── message 节点 ──
|
|
78
|
+
|
|
79
|
+
async def _emit_message(self, node: dict, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
80
|
+
"""输出模板文本"""
|
|
81
|
+
config = node.get("config", {})
|
|
82
|
+
template = config.get("content", "")
|
|
83
|
+
text = template.format_map(_SafeFormatDict(context.variables))
|
|
84
|
+
msg_id = context.message_id or str(uuid4())
|
|
85
|
+
yield event.text_message_start(message_id=msg_id, role="assistant")
|
|
86
|
+
yield event.text_message_content(message_id=msg_id, delta=text)
|
|
87
|
+
yield event.text_message_end(message_id=msg_id)
|
|
88
|
+
|
|
89
|
+
# ── 执行入口 ──
|
|
90
|
+
|
|
91
|
+
async def run(self, context: "FlowContext") -> dict[str, Any]:
|
|
92
|
+
"""执行 Flow"""
|
|
93
|
+
if not self.edges:
|
|
94
|
+
# 向后兼容:无 edges 时按 nodes 列表顺序执行
|
|
95
|
+
for node in self.nodes:
|
|
96
|
+
node_id = node.get("id")
|
|
97
|
+
if not node_id:
|
|
98
|
+
continue
|
|
99
|
+
context.current_node = node_id
|
|
100
|
+
result = await self._execute_node(node, context)
|
|
101
|
+
context.set_node_result(node_id, result)
|
|
102
|
+
else:
|
|
103
|
+
# edge 驱动执行
|
|
104
|
+
current_node_id: str | None = self.nodes[0]["id"] if self.nodes else None
|
|
105
|
+
while current_node_id:
|
|
106
|
+
node = self.get_node_config(current_node_id)
|
|
107
|
+
if not node:
|
|
108
|
+
break
|
|
109
|
+
context.current_node = current_node_id
|
|
110
|
+
node_type = node.get("type")
|
|
111
|
+
|
|
112
|
+
if node_type == "condition":
|
|
113
|
+
result = await self._evaluate_condition(node, context)
|
|
114
|
+
context.set_node_result(current_node_id, result)
|
|
115
|
+
current_node_id = self._resolve_next_node(current_node_id, result)
|
|
116
|
+
elif node_type == "message":
|
|
117
|
+
config = node.get("config", {})
|
|
118
|
+
template = config.get("content", "")
|
|
119
|
+
text = template.format_map(_SafeFormatDict(context.variables))
|
|
120
|
+
context.set_node_result(current_node_id, text)
|
|
121
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
122
|
+
elif node_type in ("agent", "tool"):
|
|
123
|
+
result = await self._execute_node(node, context)
|
|
124
|
+
context.set_node_result(current_node_id, result)
|
|
125
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
126
|
+
|
|
127
|
+
elif node_type == "parallel":
|
|
128
|
+
config = node.get("config", {})
|
|
129
|
+
branches: list[str] = config.get("branches", [])
|
|
130
|
+
|
|
131
|
+
async def _run_branch(branch_id: str) -> None:
|
|
132
|
+
branch_node = self.get_node_config(branch_id)
|
|
133
|
+
if not branch_node:
|
|
134
|
+
return
|
|
135
|
+
context.current_node = branch_id
|
|
136
|
+
self._set_parameters(branch_node.get("config", {}), context)
|
|
137
|
+
result = await self._execute_node(branch_node, context)
|
|
138
|
+
context.set_node_result(branch_id, result)
|
|
139
|
+
|
|
140
|
+
await asyncio.gather(*[_run_branch(bid) for bid in branches])
|
|
141
|
+
context.set_node_result(current_node_id, "done")
|
|
142
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
143
|
+
|
|
144
|
+
elif node_type == "iteration":
|
|
145
|
+
config = node.get("config", {})
|
|
146
|
+
items_ref = config.get("items", "")
|
|
147
|
+
items = context.resolve_reference(items_ref) if isinstance(items_ref, str) else items_ref
|
|
148
|
+
if isinstance(items, str):
|
|
149
|
+
items = _json.loads(items)
|
|
150
|
+
if not isinstance(items, list):
|
|
151
|
+
items = [items]
|
|
152
|
+
|
|
153
|
+
item_var = config.get("item_variable", "item")
|
|
154
|
+
index_var = config.get("index_variable", "index")
|
|
155
|
+
body_nodes: list[str] = config.get("body", [])
|
|
156
|
+
output_var = config.get("output_variable", "iteration_results")
|
|
157
|
+
results: list[Any] = []
|
|
158
|
+
|
|
159
|
+
for idx, item in enumerate(items):
|
|
160
|
+
context.set_variable(item_var, item)
|
|
161
|
+
context.set_variable(index_var, idx)
|
|
162
|
+
for body_node_id in body_nodes:
|
|
163
|
+
body_node = self.get_node_config(body_node_id)
|
|
164
|
+
if not body_node:
|
|
165
|
+
continue
|
|
166
|
+
self._set_parameters(body_node.get("config", {}), context)
|
|
167
|
+
body_result = await self._execute_node(body_node, context)
|
|
168
|
+
context.set_node_result(body_node_id, body_result)
|
|
169
|
+
if body_nodes:
|
|
170
|
+
results.append(context.node_results.get(body_nodes[-1]))
|
|
171
|
+
|
|
172
|
+
context.set_variable(output_var, results)
|
|
173
|
+
context.set_node_result(current_node_id, _json.dumps(results, ensure_ascii=False))
|
|
174
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
175
|
+
|
|
176
|
+
elif node_type == "loop":
|
|
177
|
+
config = node.get("config", {})
|
|
178
|
+
body_nodes_l: list[str] = config.get("body", [])
|
|
179
|
+
condition_node_id = config.get("condition_node")
|
|
180
|
+
break_cond = config.get("break_condition", "done")
|
|
181
|
+
max_iter = config.get("max_iterations", 10)
|
|
182
|
+
loop_var = config.get("loop_variable", "loop_count")
|
|
183
|
+
|
|
184
|
+
for iteration in range(max_iter):
|
|
185
|
+
context.set_variable(loop_var, iteration)
|
|
186
|
+
for body_node_id in body_nodes_l:
|
|
187
|
+
body_node = self.get_node_config(body_node_id)
|
|
188
|
+
if not body_node:
|
|
189
|
+
continue
|
|
190
|
+
self._set_parameters(body_node.get("config", {}), context)
|
|
191
|
+
body_result = await self._execute_node(body_node, context)
|
|
192
|
+
context.set_node_result(body_node_id, body_result)
|
|
193
|
+
if condition_node_id:
|
|
194
|
+
cond_result = context.node_results.get(condition_node_id, "")
|
|
195
|
+
if isinstance(cond_result, str):
|
|
196
|
+
try:
|
|
197
|
+
parsed = _json.loads(cond_result)
|
|
198
|
+
if isinstance(parsed, dict) and parsed.get("result") == break_cond:
|
|
199
|
+
break
|
|
200
|
+
except (ValueError, TypeError):
|
|
201
|
+
if cond_result == break_cond:
|
|
202
|
+
break
|
|
203
|
+
|
|
204
|
+
context.set_node_result(current_node_id, "done")
|
|
205
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
206
|
+
|
|
207
|
+
elif node_type == "python":
|
|
208
|
+
config = node.get("config", {})
|
|
209
|
+
inputs_spec: dict[str, Any] = config.get("inputs", {})
|
|
210
|
+
resolved_inputs: dict[str, Any] = {}
|
|
211
|
+
for key, ref in inputs_spec.items():
|
|
212
|
+
resolved_inputs[key] = context.resolve_reference(ref) if isinstance(ref, str) else ref
|
|
213
|
+
|
|
214
|
+
from .sandbox import execute_python_node
|
|
215
|
+
|
|
216
|
+
code_str = config.get("code", "")
|
|
217
|
+
py_result = execute_python_node(code_str, resolved_inputs)
|
|
218
|
+
|
|
219
|
+
outputs_spec: dict[str, Any] = config.get("outputs", {})
|
|
220
|
+
for key in outputs_spec:
|
|
221
|
+
if key in py_result:
|
|
222
|
+
context.set_variable(key, py_result[key])
|
|
223
|
+
|
|
224
|
+
context.set_node_result(current_node_id, _json.dumps(py_result, ensure_ascii=False))
|
|
225
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
226
|
+
else:
|
|
227
|
+
break
|
|
228
|
+
|
|
229
|
+
return context.node_results
|
|
230
|
+
|
|
231
|
+
async def stream(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
232
|
+
"""流式执行 Flow(输出 AG-UI 标准事件)"""
|
|
233
|
+
yield event.step_started(step_name=f"flow:{self.name}")
|
|
234
|
+
|
|
235
|
+
if not self.edges:
|
|
236
|
+
# 向后兼容:无 edges 时按 nodes 列表顺序执行(原有逻辑)
|
|
237
|
+
async for evt in self._stream_sequential(context):
|
|
238
|
+
yield evt
|
|
239
|
+
else:
|
|
240
|
+
# edge 驱动执行
|
|
241
|
+
async for evt in self._stream_edge_driven(context):
|
|
242
|
+
yield evt
|
|
243
|
+
|
|
244
|
+
yield event.step_finished(step_name=f"flow:{self.name}")
|
|
245
|
+
|
|
246
|
+
async def _stream_sequential(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
247
|
+
"""顺序流式执行(原有逻辑)"""
|
|
248
|
+
for node in self.nodes:
|
|
249
|
+
node_id = node.get("id")
|
|
250
|
+
if not node_id:
|
|
251
|
+
continue
|
|
252
|
+
|
|
253
|
+
context.current_node = node_id
|
|
254
|
+
yield event.step_started(step_name=f"node:{node_id}")
|
|
255
|
+
|
|
256
|
+
if node.get("type") == "agent":
|
|
257
|
+
agent_name = node.get("config", {}).get("agent_name", "")
|
|
258
|
+
yield event.step_started(step_name=f"agent:{agent_name}")
|
|
259
|
+
self._set_parameters(node.get("config", {}), context)
|
|
260
|
+
ag = self._create_agent(node.get("config", {}))
|
|
261
|
+
async for evt in ag.stream(context):
|
|
262
|
+
yield evt
|
|
263
|
+
result = context.get_last_output(ag.name) or ""
|
|
264
|
+
context.set_node_result(node_id, result)
|
|
265
|
+
yield event.step_finished(step_name=f"agent:{agent_name}")
|
|
266
|
+
else:
|
|
267
|
+
tool_name = node.get("config", {}).get("tool_name", "")
|
|
268
|
+
yield event.step_started(step_name=f"tool:{tool_name}")
|
|
269
|
+
result = await self._execute_node(node, context)
|
|
270
|
+
context.set_node_result(node_id, result)
|
|
271
|
+
yield event.step_finished(step_name=f"tool:{tool_name}")
|
|
272
|
+
|
|
273
|
+
async def _stream_edge_driven(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
274
|
+
"""边驱动流式执行"""
|
|
275
|
+
current_node_id: str | None = self.nodes[0]["id"] if self.nodes else None
|
|
276
|
+
|
|
277
|
+
while current_node_id:
|
|
278
|
+
node = self.get_node_config(current_node_id)
|
|
279
|
+
if not node:
|
|
280
|
+
break
|
|
281
|
+
|
|
282
|
+
context.current_node = current_node_id
|
|
283
|
+
node_type = node.get("type")
|
|
284
|
+
|
|
285
|
+
if node_type == "condition":
|
|
286
|
+
result = await self._evaluate_condition(node, context)
|
|
287
|
+
context.set_node_result(current_node_id, result)
|
|
288
|
+
current_node_id = self._resolve_next_node(current_node_id, result)
|
|
289
|
+
|
|
290
|
+
elif node_type == "message":
|
|
291
|
+
async for evt in self._emit_message(node, context):
|
|
292
|
+
yield evt
|
|
293
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
294
|
+
|
|
295
|
+
elif node_type == "agent":
|
|
296
|
+
agent_name = node.get("config", {}).get("agent_name", "")
|
|
297
|
+
yield event.step_started(step_name=f"agent:{agent_name}")
|
|
298
|
+
self._set_parameters(node.get("config", {}), context)
|
|
299
|
+
ag = self._create_agent(node.get("config", {}))
|
|
300
|
+
async for evt in ag.stream(context):
|
|
301
|
+
yield evt
|
|
302
|
+
result = context.get_last_output(ag.name) or ""
|
|
303
|
+
context.set_node_result(current_node_id, result)
|
|
304
|
+
yield event.step_finished(step_name=f"agent:{agent_name}")
|
|
305
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
306
|
+
|
|
307
|
+
elif node_type == "tool":
|
|
308
|
+
tool_name = node.get("config", {}).get("tool_name", "")
|
|
309
|
+
yield event.step_started(step_name=f"tool:{tool_name}")
|
|
310
|
+
result = await self._execute_node(node, context)
|
|
311
|
+
context.set_node_result(current_node_id, result)
|
|
312
|
+
yield event.step_finished(step_name=f"tool:{tool_name}")
|
|
313
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
314
|
+
|
|
315
|
+
elif node_type == "parallel":
|
|
316
|
+
config = node.get("config", {})
|
|
317
|
+
branches = config.get("branches", [])
|
|
318
|
+
yield event.step_started(step_name=f"parallel:{current_node_id}")
|
|
319
|
+
|
|
320
|
+
async def _exec_branch(branch_id: str) -> None:
|
|
321
|
+
branch_node = self.get_node_config(branch_id)
|
|
322
|
+
if not branch_node:
|
|
323
|
+
return
|
|
324
|
+
context.current_node = branch_id
|
|
325
|
+
self._set_parameters(branch_node.get("config", {}), context)
|
|
326
|
+
result = await self._execute_node(branch_node, context)
|
|
327
|
+
context.set_node_result(branch_id, result)
|
|
328
|
+
|
|
329
|
+
await asyncio.gather(*[_exec_branch(bid) for bid in branches])
|
|
330
|
+
context.set_node_result(current_node_id, "done")
|
|
331
|
+
yield event.step_finished(step_name=f"parallel:{current_node_id}")
|
|
332
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
333
|
+
|
|
334
|
+
elif node_type == "iteration":
|
|
335
|
+
config = node.get("config", {})
|
|
336
|
+
items_ref = config.get("items", "")
|
|
337
|
+
items = context.resolve_reference(items_ref) if isinstance(items_ref, str) else items_ref
|
|
338
|
+
if isinstance(items, str):
|
|
339
|
+
items = _json.loads(items)
|
|
340
|
+
if not isinstance(items, list):
|
|
341
|
+
items = [items]
|
|
342
|
+
|
|
343
|
+
item_var = config.get("item_variable", "item")
|
|
344
|
+
index_var = config.get("index_variable", "index")
|
|
345
|
+
body_nodes: list[str] = config.get("body", [])
|
|
346
|
+
output_var = config.get("output_variable", "iteration_results")
|
|
347
|
+
results: list[Any] = []
|
|
348
|
+
|
|
349
|
+
yield event.step_started(step_name=f"iteration:{current_node_id}")
|
|
350
|
+
for idx, item in enumerate(items):
|
|
351
|
+
context.set_variable(item_var, item)
|
|
352
|
+
context.set_variable(index_var, idx)
|
|
353
|
+
for body_node_id in body_nodes:
|
|
354
|
+
body_node = self.get_node_config(body_node_id)
|
|
355
|
+
if not body_node:
|
|
356
|
+
continue
|
|
357
|
+
self._set_parameters(body_node.get("config", {}), context)
|
|
358
|
+
body_result = await self._execute_node(body_node, context)
|
|
359
|
+
context.set_node_result(body_node_id, body_result)
|
|
360
|
+
if body_nodes:
|
|
361
|
+
results.append(context.node_results.get(body_nodes[-1]))
|
|
362
|
+
|
|
363
|
+
context.set_variable(output_var, results)
|
|
364
|
+
context.set_node_result(current_node_id, _json.dumps(results, ensure_ascii=False))
|
|
365
|
+
yield event.step_finished(step_name=f"iteration:{current_node_id}")
|
|
366
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
367
|
+
|
|
368
|
+
elif node_type == "loop":
|
|
369
|
+
config = node.get("config", {})
|
|
370
|
+
body_nodes_l: list[str] = config.get("body", [])
|
|
371
|
+
condition_node_id = config.get("condition_node")
|
|
372
|
+
break_cond = config.get("break_condition", "done")
|
|
373
|
+
max_iter = config.get("max_iterations", 10)
|
|
374
|
+
loop_var = config.get("loop_variable", "loop_count")
|
|
375
|
+
|
|
376
|
+
yield event.step_started(step_name=f"loop:{current_node_id}")
|
|
377
|
+
for iteration in range(max_iter):
|
|
378
|
+
context.set_variable(loop_var, iteration)
|
|
379
|
+
for body_node_id in body_nodes_l:
|
|
380
|
+
body_node = self.get_node_config(body_node_id)
|
|
381
|
+
if not body_node:
|
|
382
|
+
continue
|
|
383
|
+
self._set_parameters(body_node.get("config", {}), context)
|
|
384
|
+
body_result = await self._execute_node(body_node, context)
|
|
385
|
+
context.set_node_result(body_node_id, body_result)
|
|
386
|
+
# 检查终止条件
|
|
387
|
+
if condition_node_id:
|
|
388
|
+
cond_result = context.node_results.get(condition_node_id, "")
|
|
389
|
+
if isinstance(cond_result, str):
|
|
390
|
+
try:
|
|
391
|
+
parsed = _json.loads(cond_result)
|
|
392
|
+
if isinstance(parsed, dict) and parsed.get("result") == break_cond:
|
|
393
|
+
break
|
|
394
|
+
except (ValueError, TypeError):
|
|
395
|
+
if cond_result == break_cond:
|
|
396
|
+
break
|
|
397
|
+
|
|
398
|
+
context.set_node_result(current_node_id, "done")
|
|
399
|
+
yield event.step_finished(step_name=f"loop:{current_node_id}")
|
|
400
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
401
|
+
|
|
402
|
+
elif node_type == "python":
|
|
403
|
+
config = node.get("config", {})
|
|
404
|
+
yield event.step_started(step_name=f"python:{current_node_id}")
|
|
405
|
+
|
|
406
|
+
# 解析 inputs
|
|
407
|
+
inputs_spec: dict[str, Any] = config.get("inputs", {})
|
|
408
|
+
resolved_inputs: dict[str, Any] = {}
|
|
409
|
+
for key, ref in inputs_spec.items():
|
|
410
|
+
resolved_inputs[key] = context.resolve_reference(ref) if isinstance(ref, str) else ref
|
|
411
|
+
|
|
412
|
+
# 沙箱执行
|
|
413
|
+
from .sandbox import execute_python_node
|
|
414
|
+
|
|
415
|
+
code_str = config.get("code", "")
|
|
416
|
+
py_result = execute_python_node(code_str, resolved_inputs)
|
|
417
|
+
|
|
418
|
+
# 映射 outputs 到 context.variables
|
|
419
|
+
outputs_spec: dict[str, Any] = config.get("outputs", {})
|
|
420
|
+
for key in outputs_spec:
|
|
421
|
+
if key in py_result:
|
|
422
|
+
context.set_variable(key, py_result[key])
|
|
423
|
+
|
|
424
|
+
context.set_node_result(current_node_id, _json.dumps(py_result, ensure_ascii=False))
|
|
425
|
+
yield event.step_finished(step_name=f"python:{current_node_id}")
|
|
426
|
+
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
427
|
+
|
|
428
|
+
else:
|
|
429
|
+
break
|
|
430
|
+
|
|
431
|
+
async def _execute_node(self, node_config: dict, context: "FlowContext") -> Any:
|
|
432
|
+
"""执行节点"""
|
|
433
|
+
node_type = node_config.get("type")
|
|
434
|
+
config = node_config.get("config", {})
|
|
435
|
+
|
|
436
|
+
# 设置参数到 context
|
|
437
|
+
self._set_parameters(config, context)
|
|
438
|
+
|
|
439
|
+
# 创建并执行 runnable
|
|
440
|
+
if node_type == "agent":
|
|
441
|
+
runnable = self._create_agent(config)
|
|
442
|
+
elif node_type == "tool":
|
|
443
|
+
runnable = self._create_tool(config)
|
|
444
|
+
else:
|
|
445
|
+
raise FlowError("UNKNOWN_NODE_TYPE", 400, {"type": node_type})
|
|
446
|
+
|
|
447
|
+
return await runnable.run(context)
|
|
448
|
+
|
|
449
|
+
def _set_parameters(self, config: dict, context: "FlowContext") -> None:
|
|
450
|
+
"""设置参数到 context"""
|
|
451
|
+
parameters = config.get("parameters", {})
|
|
452
|
+
|
|
453
|
+
for key, value in parameters.items():
|
|
454
|
+
resolved_value = context.resolve_reference(value) if isinstance(value, str) else value
|
|
455
|
+
context.set_variable(key, resolved_value)
|
|
456
|
+
|
|
457
|
+
def _create_agent(self, config: dict):
|
|
458
|
+
"""创建 Agent"""
|
|
459
|
+
agent_name = config.get("agent_name")
|
|
460
|
+
if not agent_name:
|
|
461
|
+
raise FlowError("MISSING_AGENT_NAME", 400)
|
|
462
|
+
|
|
463
|
+
agent = registry.create_agent(agent_name)
|
|
464
|
+
if not agent:
|
|
465
|
+
raise FlowError("AGENT_NOT_FOUND", 404, {"agent_name": agent_name})
|
|
466
|
+
|
|
467
|
+
return agent
|
|
468
|
+
|
|
469
|
+
def _create_tool(self, config: dict):
|
|
470
|
+
"""创建 Tool"""
|
|
471
|
+
tool_name = config.get("tool_name")
|
|
472
|
+
if not tool_name:
|
|
473
|
+
raise FlowError("MISSING_TOOL_NAME", 400)
|
|
474
|
+
|
|
475
|
+
tool = registry.create_tool(tool_name)
|
|
476
|
+
if not tool:
|
|
477
|
+
raise FlowError("TOOL_NOT_FOUND", 404, {"tool_name": tool_name})
|
|
478
|
+
|
|
479
|
+
return tool
|
|
480
|
+
|
|
481
|
+
def get_node_config(self, node_id: str) -> dict[str, Any] | None:
|
|
482
|
+
"""获取节点配置"""
|
|
483
|
+
for node in self.nodes:
|
|
484
|
+
if node.get("id") == node_id:
|
|
485
|
+
return node
|
|
486
|
+
return None
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Copyright (c) 2020-2025 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Python 沙箱执行(基于 RestrictedPython)"""
|
|
4
|
+
|
|
5
|
+
import builtins
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from RestrictedPython import compile_restricted, safe_globals
|
|
9
|
+
from RestrictedPython.Eval import default_guarded_getitem, default_guarded_getiter
|
|
10
|
+
from RestrictedPython.Guards import guarded_unpack_sequence, safer_getattr
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# 白名单内置模块
|
|
14
|
+
_ALLOWED_MODULES = frozenset({
|
|
15
|
+
"json", "re", "math", "datetime", "collections",
|
|
16
|
+
"itertools", "functools", "operator", "string",
|
|
17
|
+
})
|
|
18
|
+
|
|
19
|
+
_builtins_import = builtins.__import__
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _safe_import(name: str, *args: Any, **kwargs: Any) -> Any:
|
|
23
|
+
"""只允许导入白名单模块"""
|
|
24
|
+
if name not in _ALLOWED_MODULES:
|
|
25
|
+
raise ImportError(f"Import of '{name}' is not allowed in python node")
|
|
26
|
+
return _builtins_import(name, *args, **kwargs)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def execute_python_node(code: str, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
30
|
+
"""在 RestrictedPython 沙箱中执行用户代码
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
code: 用户代码,必须定义 main(**kwargs) -> dict 函数
|
|
34
|
+
inputs: 传入 main 函数的参数
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
main 函数的返回值(dict)
|
|
38
|
+
"""
|
|
39
|
+
byte_code = compile_restricted(code, "<flow_python_node>", "exec")
|
|
40
|
+
|
|
41
|
+
glb: dict[str, Any] = dict(safe_globals)
|
|
42
|
+
glb["_getitem_"] = default_guarded_getitem
|
|
43
|
+
glb["_getiter_"] = default_guarded_getiter
|
|
44
|
+
glb["_unpack_sequence_"] = guarded_unpack_sequence
|
|
45
|
+
glb["_getattr_"] = safer_getattr
|
|
46
|
+
glb["__builtins__"] = {**glb["__builtins__"], "__import__": _safe_import}
|
|
47
|
+
|
|
48
|
+
loc: dict[str, Any] = {}
|
|
49
|
+
exec(byte_code, glb, loc) # noqa: S102
|
|
50
|
+
|
|
51
|
+
main_fn = loc.get("main")
|
|
52
|
+
if not callable(main_fn):
|
|
53
|
+
raise ValueError("Python node code must define a callable 'main' function")
|
|
54
|
+
|
|
55
|
+
result = main_fn(**inputs)
|
|
56
|
+
if not isinstance(result, dict):
|
|
57
|
+
raise TypeError(f"main() must return a dict, got {type(result).__name__}")
|
|
58
|
+
|
|
59
|
+
return result
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agstack
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Production-ready toolkit for building FastAPI and LLM applications
|
|
5
5
|
Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
6
6
|
Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
@@ -33,6 +33,7 @@ Requires-Dist: pycasbin>=2.8.0
|
|
|
33
33
|
Requires-Dist: pydantic>=2.12.4
|
|
34
34
|
Requires-Dist: python-multipart>=0.0.20
|
|
35
35
|
Requires-Dist: requests>=2.32.5
|
|
36
|
+
Requires-Dist: RestrictedPython>=7.0
|
|
36
37
|
Requires-Dist: sqlalchemy[asyncio]>=2.0.48
|
|
37
38
|
Requires-Dist: sqlobjects>=1.3.0
|
|
38
39
|
Requires-Dist: tiktoken>=0.12.0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "agstack"
|
|
3
|
-
version = "1.
|
|
3
|
+
version = "1.4.0"
|
|
4
4
|
description = "Production-ready toolkit for building FastAPI and LLM applications"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = "MIT"
|
|
@@ -52,6 +52,7 @@ dependencies = [
|
|
|
52
52
|
"pydantic>=2.12.4",
|
|
53
53
|
"python-multipart>=0.0.20",
|
|
54
54
|
"requests>=2.32.5",
|
|
55
|
+
"RestrictedPython>=7.0",
|
|
55
56
|
"sqlalchemy[asyncio]>=2.0.48",
|
|
56
57
|
"sqlobjects>=1.3.0",
|
|
57
58
|
"tiktoken>=0.12.0",
|
|
@@ -1,272 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2020-2025 XtraVisions, All rights reserved.
|
|
2
|
-
|
|
3
|
-
"""Flow 定义和执行"""
|
|
4
|
-
|
|
5
|
-
import json as _json
|
|
6
|
-
from dataclasses import dataclass, field
|
|
7
|
-
from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
8
|
-
from uuid import uuid4
|
|
9
|
-
|
|
10
|
-
from . import event
|
|
11
|
-
from .exceptions import FlowError
|
|
12
|
-
from .registry import registry
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
if TYPE_CHECKING:
|
|
16
|
-
from .context import FlowContext
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class _SafeFormatDict(dict):
|
|
20
|
-
"""安全的模板变量替换,缺失 key 时保留原始占位符"""
|
|
21
|
-
|
|
22
|
-
def __missing__(self, key: str) -> str:
|
|
23
|
-
return f"{{{key}}}"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
@dataclass
|
|
27
|
-
class Flow:
|
|
28
|
-
"""Flow 配置定义"""
|
|
29
|
-
|
|
30
|
-
flow_id: str
|
|
31
|
-
name: str
|
|
32
|
-
description: str = ""
|
|
33
|
-
nodes: list[dict[str, Any]] = field(default_factory=list)
|
|
34
|
-
edges: list[dict[str, Any]] = field(default_factory=list)
|
|
35
|
-
variables: dict[str, Any] = field(default_factory=dict)
|
|
36
|
-
|
|
37
|
-
# ── 边驱动路由 ──
|
|
38
|
-
|
|
39
|
-
def _resolve_next_node(self, current_id: str, result: str | None = None) -> str | None:
|
|
40
|
-
"""根据当前节点和执行结果,通过 edges 查找下一节点"""
|
|
41
|
-
for edge in self.edges:
|
|
42
|
-
if edge.get("source") == current_id:
|
|
43
|
-
cond = edge.get("condition")
|
|
44
|
-
if cond is None or cond == result:
|
|
45
|
-
return edge.get("target")
|
|
46
|
-
return None
|
|
47
|
-
|
|
48
|
-
# ── condition 节点 ──
|
|
49
|
-
|
|
50
|
-
async def _evaluate_condition(self, node: dict, context: "FlowContext") -> str:
|
|
51
|
-
"""调用 LLM 判断条件是否匹配"""
|
|
52
|
-
config = node.get("config", {})
|
|
53
|
-
topic = config.get("topic", "")
|
|
54
|
-
query = context.get_variable("query", "")
|
|
55
|
-
|
|
56
|
-
prompt = (
|
|
57
|
-
f"判断以下问题是否属于「{topic}」相关问题。\n"
|
|
58
|
-
f"问题:{query}\n"
|
|
59
|
-
f'仅回复 JSON:{{"result": "match"}} 或 {{"result": "reject"}}'
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
from ..client import get_llm_client
|
|
63
|
-
|
|
64
|
-
client = get_llm_client()
|
|
65
|
-
response = await client.chat(
|
|
66
|
-
messages=[{"role": "user", "content": prompt}],
|
|
67
|
-
model=config.get("model", "gpt-4o-mini"),
|
|
68
|
-
temperature=0,
|
|
69
|
-
)
|
|
70
|
-
text = response.choices[0].message.content or ""
|
|
71
|
-
try:
|
|
72
|
-
return _json.loads(text).get("result", "reject")
|
|
73
|
-
except Exception:
|
|
74
|
-
return "match" if "match" in text.lower() else "reject"
|
|
75
|
-
|
|
76
|
-
# ── message 节点 ──
|
|
77
|
-
|
|
78
|
-
async def _emit_message(self, node: dict, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
79
|
-
"""输出模板文本"""
|
|
80
|
-
config = node.get("config", {})
|
|
81
|
-
template = config.get("content", "")
|
|
82
|
-
text = template.format_map(_SafeFormatDict(context.variables))
|
|
83
|
-
msg_id = context.message_id or str(uuid4())
|
|
84
|
-
yield event.text_message_start(message_id=msg_id, role="assistant")
|
|
85
|
-
yield event.text_message_content(message_id=msg_id, delta=text)
|
|
86
|
-
yield event.text_message_end(message_id=msg_id)
|
|
87
|
-
|
|
88
|
-
# ── 执行入口 ──
|
|
89
|
-
|
|
90
|
-
async def run(self, context: "FlowContext") -> dict[str, Any]:
|
|
91
|
-
"""执行 Flow"""
|
|
92
|
-
if not self.edges:
|
|
93
|
-
# 向后兼容:无 edges 时按 nodes 列表顺序执行
|
|
94
|
-
for node in self.nodes:
|
|
95
|
-
node_id = node.get("id")
|
|
96
|
-
if not node_id:
|
|
97
|
-
continue
|
|
98
|
-
context.current_node = node_id
|
|
99
|
-
result = await self._execute_node(node, context)
|
|
100
|
-
context.set_node_result(node_id, result)
|
|
101
|
-
else:
|
|
102
|
-
# edge 驱动执行
|
|
103
|
-
current_node_id: str | None = self.nodes[0]["id"] if self.nodes else None
|
|
104
|
-
while current_node_id:
|
|
105
|
-
node = self.get_node_config(current_node_id)
|
|
106
|
-
if not node:
|
|
107
|
-
break
|
|
108
|
-
context.current_node = current_node_id
|
|
109
|
-
node_type = node.get("type")
|
|
110
|
-
|
|
111
|
-
if node_type == "condition":
|
|
112
|
-
result = await self._evaluate_condition(node, context)
|
|
113
|
-
context.set_node_result(current_node_id, result)
|
|
114
|
-
current_node_id = self._resolve_next_node(current_node_id, result)
|
|
115
|
-
elif node_type == "message":
|
|
116
|
-
config = node.get("config", {})
|
|
117
|
-
template = config.get("content", "")
|
|
118
|
-
text = template.format_map(_SafeFormatDict(context.variables))
|
|
119
|
-
context.set_node_result(current_node_id, text)
|
|
120
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
121
|
-
elif node_type in ("agent", "tool"):
|
|
122
|
-
result = await self._execute_node(node, context)
|
|
123
|
-
context.set_node_result(current_node_id, result)
|
|
124
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
125
|
-
else:
|
|
126
|
-
break
|
|
127
|
-
|
|
128
|
-
return context.node_results
|
|
129
|
-
|
|
130
|
-
async def stream(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
131
|
-
"""流式执行 Flow(输出 AG-UI 标准事件)"""
|
|
132
|
-
yield event.step_started(step_name=f"flow:{self.name}")
|
|
133
|
-
|
|
134
|
-
if not self.edges:
|
|
135
|
-
# 向后兼容:无 edges 时按 nodes 列表顺序执行(原有逻辑)
|
|
136
|
-
async for evt in self._stream_sequential(context):
|
|
137
|
-
yield evt
|
|
138
|
-
else:
|
|
139
|
-
# edge 驱动执行
|
|
140
|
-
async for evt in self._stream_edge_driven(context):
|
|
141
|
-
yield evt
|
|
142
|
-
|
|
143
|
-
yield event.step_finished(step_name=f"flow:{self.name}")
|
|
144
|
-
|
|
145
|
-
async def _stream_sequential(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
146
|
-
"""顺序流式执行(原有逻辑)"""
|
|
147
|
-
for node in self.nodes:
|
|
148
|
-
node_id = node.get("id")
|
|
149
|
-
if not node_id:
|
|
150
|
-
continue
|
|
151
|
-
|
|
152
|
-
context.current_node = node_id
|
|
153
|
-
yield event.step_started(step_name=f"node:{node_id}")
|
|
154
|
-
|
|
155
|
-
if node.get("type") == "agent":
|
|
156
|
-
agent_name = node.get("config", {}).get("agent_name", "")
|
|
157
|
-
yield event.step_started(step_name=f"agent:{agent_name}")
|
|
158
|
-
self._set_parameters(node.get("config", {}), context)
|
|
159
|
-
ag = self._create_agent(node.get("config", {}))
|
|
160
|
-
async for evt in ag.stream(context):
|
|
161
|
-
yield evt
|
|
162
|
-
result = context.get_last_output(ag.name) or ""
|
|
163
|
-
context.set_node_result(node_id, result)
|
|
164
|
-
yield event.step_finished(step_name=f"agent:{agent_name}")
|
|
165
|
-
else:
|
|
166
|
-
tool_name = node.get("config", {}).get("tool_name", "")
|
|
167
|
-
yield event.step_started(step_name=f"tool:{tool_name}")
|
|
168
|
-
result = await self._execute_node(node, context)
|
|
169
|
-
context.set_node_result(node_id, result)
|
|
170
|
-
yield event.step_finished(step_name=f"tool:{tool_name}")
|
|
171
|
-
|
|
172
|
-
async def _stream_edge_driven(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
173
|
-
"""边驱动流式执行"""
|
|
174
|
-
current_node_id: str | None = self.nodes[0]["id"] if self.nodes else None
|
|
175
|
-
|
|
176
|
-
while current_node_id:
|
|
177
|
-
node = self.get_node_config(current_node_id)
|
|
178
|
-
if not node:
|
|
179
|
-
break
|
|
180
|
-
|
|
181
|
-
context.current_node = current_node_id
|
|
182
|
-
node_type = node.get("type")
|
|
183
|
-
|
|
184
|
-
if node_type == "condition":
|
|
185
|
-
result = await self._evaluate_condition(node, context)
|
|
186
|
-
context.set_node_result(current_node_id, result)
|
|
187
|
-
current_node_id = self._resolve_next_node(current_node_id, result)
|
|
188
|
-
|
|
189
|
-
elif node_type == "message":
|
|
190
|
-
async for evt in self._emit_message(node, context):
|
|
191
|
-
yield evt
|
|
192
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
193
|
-
|
|
194
|
-
elif node_type == "agent":
|
|
195
|
-
agent_name = node.get("config", {}).get("agent_name", "")
|
|
196
|
-
yield event.step_started(step_name=f"agent:{agent_name}")
|
|
197
|
-
self._set_parameters(node.get("config", {}), context)
|
|
198
|
-
ag = self._create_agent(node.get("config", {}))
|
|
199
|
-
async for evt in ag.stream(context):
|
|
200
|
-
yield evt
|
|
201
|
-
result = context.get_last_output(ag.name) or ""
|
|
202
|
-
context.set_node_result(current_node_id, result)
|
|
203
|
-
yield event.step_finished(step_name=f"agent:{agent_name}")
|
|
204
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
205
|
-
|
|
206
|
-
elif node_type == "tool":
|
|
207
|
-
tool_name = node.get("config", {}).get("tool_name", "")
|
|
208
|
-
yield event.step_started(step_name=f"tool:{tool_name}")
|
|
209
|
-
result = await self._execute_node(node, context)
|
|
210
|
-
context.set_node_result(current_node_id, result)
|
|
211
|
-
yield event.step_finished(step_name=f"tool:{tool_name}")
|
|
212
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
213
|
-
|
|
214
|
-
else:
|
|
215
|
-
break
|
|
216
|
-
|
|
217
|
-
async def _execute_node(self, node_config: dict, context: "FlowContext") -> Any:
|
|
218
|
-
"""执行节点"""
|
|
219
|
-
node_type = node_config.get("type")
|
|
220
|
-
config = node_config.get("config", {})
|
|
221
|
-
|
|
222
|
-
# 设置参数到 context
|
|
223
|
-
self._set_parameters(config, context)
|
|
224
|
-
|
|
225
|
-
# 创建并执行 runnable
|
|
226
|
-
if node_type == "agent":
|
|
227
|
-
runnable = self._create_agent(config)
|
|
228
|
-
elif node_type == "tool":
|
|
229
|
-
runnable = self._create_tool(config)
|
|
230
|
-
else:
|
|
231
|
-
raise FlowError("UNKNOWN_NODE_TYPE", 400, {"type": node_type})
|
|
232
|
-
|
|
233
|
-
return await runnable.run(context)
|
|
234
|
-
|
|
235
|
-
def _set_parameters(self, config: dict, context: "FlowContext") -> None:
|
|
236
|
-
"""设置参数到 context"""
|
|
237
|
-
parameters = config.get("parameters", {})
|
|
238
|
-
|
|
239
|
-
for key, value in parameters.items():
|
|
240
|
-
resolved_value = context.resolve_reference(value) if isinstance(value, str) else value
|
|
241
|
-
context.set_variable(key, resolved_value)
|
|
242
|
-
|
|
243
|
-
def _create_agent(self, config: dict):
|
|
244
|
-
"""创建 Agent"""
|
|
245
|
-
agent_name = config.get("agent_name")
|
|
246
|
-
if not agent_name:
|
|
247
|
-
raise FlowError("MISSING_AGENT_NAME", 400)
|
|
248
|
-
|
|
249
|
-
agent = registry.create_agent(agent_name)
|
|
250
|
-
if not agent:
|
|
251
|
-
raise FlowError("AGENT_NOT_FOUND", 404, {"agent_name": agent_name})
|
|
252
|
-
|
|
253
|
-
return agent
|
|
254
|
-
|
|
255
|
-
def _create_tool(self, config: dict):
|
|
256
|
-
"""创建 Tool"""
|
|
257
|
-
tool_name = config.get("tool_name")
|
|
258
|
-
if not tool_name:
|
|
259
|
-
raise FlowError("MISSING_TOOL_NAME", 400)
|
|
260
|
-
|
|
261
|
-
tool = registry.create_tool(tool_name)
|
|
262
|
-
if not tool:
|
|
263
|
-
raise FlowError("TOOL_NOT_FOUND", 404, {"tool_name": tool_name})
|
|
264
|
-
|
|
265
|
-
return tool
|
|
266
|
-
|
|
267
|
-
def get_node_config(self, node_id: str) -> dict[str, Any] | None:
|
|
268
|
-
"""获取节点配置"""
|
|
269
|
-
for node in self.nodes:
|
|
270
|
-
if node.get("id") == node_id:
|
|
271
|
-
return node
|
|
272
|
-
return None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|