agstack 1.5.0__tar.gz → 1.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agstack-1.5.0 → agstack-1.6.0}/PKG-INFO +1 -1
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/__init__.py +4 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/flow.py +97 -173
- agstack-1.6.0/agstack/llm/flow/nodes/__init__.py +39 -0
- agstack-1.6.0/agstack/llm/flow/nodes/agent_node.py +55 -0
- agstack-1.6.0/agstack/llm/flow/nodes/base.py +59 -0
- agstack-1.6.0/agstack/llm/flow/nodes/detect_node.py +93 -0
- agstack-1.6.0/agstack/llm/flow/nodes/llm_chat_node.py +152 -0
- agstack-1.6.0/agstack/llm/flow/nodes/llm_embed_node.py +39 -0
- agstack-1.6.0/agstack/llm/flow/nodes/llm_rerank_node.py +49 -0
- agstack-1.5.0/agstack/llm/flow/sandbox.py → agstack-1.6.0/agstack/llm/flow/nodes/python_node.py +31 -4
- agstack-1.6.0/agstack/llm/flow/nodes/tool_node.py +40 -0
- agstack-1.6.0/agstack/llm/flow/sandbox.py +8 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack.egg-info/PKG-INFO +1 -1
- {agstack-1.5.0 → agstack-1.6.0}/agstack.egg-info/SOURCES.txt +9 -0
- {agstack-1.5.0 → agstack-1.6.0}/pyproject.toml +1 -1
- {agstack-1.5.0 → agstack-1.6.0}/LICENSE +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/README.md +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/config/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/config/logger.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/config/manager.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/config/types.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/contexts.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/decorators.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/events.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/exceptions.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/fastapi/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/fastapi/exception.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/fastapi/middleware.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/fastapi/offline.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/fastapi/sse.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/infra/db/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/infra/es/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/infra/kg/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/infra/mq/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/client.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/agent.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/context.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/event.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/exceptions.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/factory.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/loader.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/records.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/registry.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/state.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/flow/tool.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/prompts.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/llm/token.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/registry.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/schema.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/security/__init__.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/security/casbin.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/security/crypt.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack/status.py +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack.egg-info/dependency_links.txt +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack.egg-info/requires.txt +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/agstack.egg-info/top_level.txt +0 -0
- {agstack-1.5.0 → agstack-1.6.0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agstack
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.6.0
|
|
4
4
|
Summary: Production-ready toolkit for building FastAPI and LLM applications
|
|
5
5
|
Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
6
6
|
Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
@@ -18,6 +18,7 @@ from .exceptions import (
|
|
|
18
18
|
from .factory import create_agent, create_tool
|
|
19
19
|
from .flow import Flow
|
|
20
20
|
from .loader import FlowLoader
|
|
21
|
+
from .nodes import NodeHandler, register_node_handler
|
|
21
22
|
from .records import Record, Status
|
|
22
23
|
from .registry import registry
|
|
23
24
|
from .state import FlowState
|
|
@@ -32,6 +33,9 @@ __all__ = [
|
|
|
32
33
|
"Flow",
|
|
33
34
|
"FlowContext",
|
|
34
35
|
"Usage",
|
|
36
|
+
# 节点处理器
|
|
37
|
+
"NodeHandler",
|
|
38
|
+
"register_node_handler",
|
|
35
39
|
# AG-UI 协议
|
|
36
40
|
"EventType",
|
|
37
41
|
"event",
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (c) 2020-
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
2
|
|
|
3
3
|
"""Flow 定义和执行"""
|
|
4
4
|
|
|
@@ -9,12 +9,12 @@ from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
|
9
9
|
from uuid import uuid4
|
|
10
10
|
|
|
11
11
|
from . import event
|
|
12
|
-
from .exceptions import
|
|
13
|
-
from .registry import registry
|
|
12
|
+
from .exceptions import NodeExecutionError
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
if TYPE_CHECKING:
|
|
17
16
|
from .context import FlowContext
|
|
17
|
+
from .nodes.base import NodeHandler
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
@dataclass
|
|
@@ -44,6 +44,16 @@ class Flow:
|
|
|
44
44
|
edges: list[dict[str, Any]] = field(default_factory=list)
|
|
45
45
|
variables: dict[str, Any] = field(default_factory=dict)
|
|
46
46
|
|
|
47
|
+
_node_handlers: dict[str, "NodeHandler"] = field(default_factory=dict, init=False, repr=False)
|
|
48
|
+
|
|
49
|
+
def __post_init__(self) -> None:
|
|
50
|
+
from .nodes import _global_node_handlers, builtin_handlers
|
|
51
|
+
|
|
52
|
+
for handler in builtin_handlers:
|
|
53
|
+
self._node_handlers[handler.node_type] = handler
|
|
54
|
+
# 全局注册的自定义 handler 可覆盖内置
|
|
55
|
+
self._node_handlers.update(_global_node_handlers)
|
|
56
|
+
|
|
47
57
|
# ── 重试策略 ──
|
|
48
58
|
|
|
49
59
|
@staticmethod
|
|
@@ -99,7 +109,7 @@ class Flow:
|
|
|
99
109
|
yield event.text_message_content(message_id=msg_id, delta=text)
|
|
100
110
|
yield event.text_message_end(message_id=msg_id)
|
|
101
111
|
|
|
102
|
-
# ──
|
|
112
|
+
# ── 带重试的节点执行(统一走 NodeHandler) ──
|
|
103
113
|
|
|
104
114
|
async def _execute_node_with_retry(
|
|
105
115
|
self,
|
|
@@ -108,10 +118,17 @@ class Flow:
|
|
|
108
118
|
node_id: str,
|
|
109
119
|
) -> AsyncIterator[dict[str, Any]]:
|
|
110
120
|
"""执行节点,带重试策略,产出 AG-UI 事件"""
|
|
121
|
+
node_type: str = node.get("type", "")
|
|
122
|
+
handler = self._node_handlers.get(node_type)
|
|
123
|
+
if not handler:
|
|
124
|
+
yield event.run_error(
|
|
125
|
+
message=f"Unknown node type: {node_type}",
|
|
126
|
+
code="UNKNOWN_NODE_TYPE",
|
|
127
|
+
)
|
|
128
|
+
raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
|
|
129
|
+
|
|
111
130
|
policy = self._get_retry_policy(node)
|
|
112
|
-
|
|
113
|
-
config = node.get("config", {})
|
|
114
|
-
label = config.get("agent_name") or config.get("tool_name") or node_id
|
|
131
|
+
label = handler.get_step_name(node, node_id)
|
|
115
132
|
last_error: Exception | None = None
|
|
116
133
|
|
|
117
134
|
for attempt in range(policy.max_retries + 1):
|
|
@@ -131,23 +148,9 @@ class Flow:
|
|
|
131
148
|
},
|
|
132
149
|
)
|
|
133
150
|
|
|
134
|
-
|
|
135
|
-
yield
|
|
136
|
-
|
|
137
|
-
ag = self._create_agent(config)
|
|
138
|
-
async for evt in ag.stream(context):
|
|
139
|
-
yield evt
|
|
140
|
-
result = context.get_last_output(ag.name) or ""
|
|
141
|
-
context.set_node_result(node_id, result)
|
|
142
|
-
yield event.step_finished(step_name=f"agent:{label}")
|
|
143
|
-
return
|
|
144
|
-
|
|
145
|
-
elif node_type == "tool":
|
|
146
|
-
yield event.step_started(step_name=f"tool:{label}")
|
|
147
|
-
result = await self._execute_node(node, context)
|
|
148
|
-
context.set_node_result(node_id, result)
|
|
149
|
-
yield event.step_finished(step_name=f"tool:{label}")
|
|
150
|
-
return
|
|
151
|
+
async for evt in handler.stream(node, context, node_id):
|
|
152
|
+
yield evt
|
|
153
|
+
return
|
|
151
154
|
|
|
152
155
|
except Exception as e:
|
|
153
156
|
last_error = e
|
|
@@ -173,8 +176,13 @@ class Flow:
|
|
|
173
176
|
if not node_id:
|
|
174
177
|
continue
|
|
175
178
|
context.current_node = node_id
|
|
176
|
-
|
|
177
|
-
|
|
179
|
+
node_type: str = node.get("type", "")
|
|
180
|
+
handler = self._node_handlers.get(node_type)
|
|
181
|
+
if handler:
|
|
182
|
+
result = await handler.execute(node, context)
|
|
183
|
+
context.set_node_result(node_id, result)
|
|
184
|
+
else:
|
|
185
|
+
raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
|
|
178
186
|
else:
|
|
179
187
|
# edge 驱动执行
|
|
180
188
|
current_node_id: str | None = self.nodes[0]["id"] if self.nodes else None
|
|
@@ -183,7 +191,7 @@ class Flow:
|
|
|
183
191
|
if not node:
|
|
184
192
|
break
|
|
185
193
|
context.current_node = current_node_id
|
|
186
|
-
node_type = node.get("type")
|
|
194
|
+
node_type: str = node.get("type", "")
|
|
187
195
|
|
|
188
196
|
if node_type == "message":
|
|
189
197
|
config = node.get("config", {})
|
|
@@ -191,13 +199,6 @@ class Flow:
|
|
|
191
199
|
text = template.format_map(_SafeFormatDict(context.variables))
|
|
192
200
|
context.set_node_result(current_node_id, text)
|
|
193
201
|
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
194
|
-
elif node_type in ("agent", "tool"):
|
|
195
|
-
result = await self._execute_node(node, context)
|
|
196
|
-
context.set_node_result(current_node_id, result)
|
|
197
|
-
route_key = self._extract_route_key(result)
|
|
198
|
-
current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
|
|
199
|
-
current_node_id, "done"
|
|
200
|
-
)
|
|
201
202
|
|
|
202
203
|
elif node_type == "parallel":
|
|
203
204
|
config = node.get("config", {})
|
|
@@ -208,9 +209,11 @@ class Flow:
|
|
|
208
209
|
if not branch_node:
|
|
209
210
|
return
|
|
210
211
|
context.current_node = branch_id
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
212
|
+
branch_type: str = branch_node.get("type", "")
|
|
213
|
+
branch_handler = self._node_handlers.get(branch_type)
|
|
214
|
+
if branch_handler:
|
|
215
|
+
result = await branch_handler.execute(branch_node, context)
|
|
216
|
+
context.set_node_result(branch_id, result)
|
|
214
217
|
|
|
215
218
|
await asyncio.gather(*[_run_branch(bid) for bid in branches])
|
|
216
219
|
context.set_node_result(current_node_id, "done")
|
|
@@ -238,9 +241,11 @@ class Flow:
|
|
|
238
241
|
body_node = self.get_node_config(body_node_id)
|
|
239
242
|
if not body_node:
|
|
240
243
|
continue
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
+
body_type: str = body_node.get("type", "")
|
|
245
|
+
body_handler = self._node_handlers.get(body_type)
|
|
246
|
+
if body_handler:
|
|
247
|
+
body_result = await body_handler.execute(body_node, context)
|
|
248
|
+
context.set_node_result(body_node_id, body_result)
|
|
244
249
|
if body_nodes:
|
|
245
250
|
results.append(context.node_results.get(body_nodes[-1]))
|
|
246
251
|
|
|
@@ -262,9 +267,11 @@ class Flow:
|
|
|
262
267
|
body_node = self.get_node_config(body_node_id)
|
|
263
268
|
if not body_node:
|
|
264
269
|
continue
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
270
|
+
body_type: str = body_node.get("type", "")
|
|
271
|
+
body_handler = self._node_handlers.get(body_type)
|
|
272
|
+
if body_handler:
|
|
273
|
+
body_result = await body_handler.execute(body_node, context)
|
|
274
|
+
context.set_node_result(body_node_id, body_result)
|
|
268
275
|
if condition_node_id:
|
|
269
276
|
cond_result = context.node_results.get(condition_node_id, "")
|
|
270
277
|
if isinstance(cond_result, str):
|
|
@@ -279,27 +286,18 @@ class Flow:
|
|
|
279
286
|
context.set_node_result(current_node_id, "done")
|
|
280
287
|
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
281
288
|
|
|
282
|
-
elif node_type
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
code_str = config.get("code", "")
|
|
292
|
-
py_result = execute_python_node(code_str, resolved_inputs)
|
|
293
|
-
|
|
294
|
-
outputs_spec: dict[str, Any] = config.get("outputs", {})
|
|
295
|
-
for key in outputs_spec:
|
|
296
|
-
if key in py_result:
|
|
297
|
-
context.set_variable(key, py_result[key])
|
|
289
|
+
elif node_type in self._node_handlers:
|
|
290
|
+
# 所有执行类节点统一分发
|
|
291
|
+
handler = self._node_handlers[node_type]
|
|
292
|
+
result = await handler.execute(node, context)
|
|
293
|
+
context.set_node_result(current_node_id, result)
|
|
294
|
+
route_key = self._extract_route_key(result)
|
|
295
|
+
current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
|
|
296
|
+
current_node_id, "done"
|
|
297
|
+
)
|
|
298
298
|
|
|
299
|
-
context.set_node_result(current_node_id, _json.dumps(py_result, ensure_ascii=False))
|
|
300
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
301
299
|
else:
|
|
302
|
-
|
|
300
|
+
raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
|
|
303
301
|
|
|
304
302
|
return context.node_results
|
|
305
303
|
|
|
@@ -326,17 +324,17 @@ class Flow:
|
|
|
326
324
|
continue
|
|
327
325
|
|
|
328
326
|
context.current_node = node_id
|
|
329
|
-
|
|
327
|
+
node_type: str = node.get("type", "")
|
|
330
328
|
|
|
331
|
-
if
|
|
329
|
+
if node_type in self._node_handlers:
|
|
332
330
|
async for evt in self._execute_node_with_retry(node, context, node_id):
|
|
333
331
|
yield evt
|
|
334
332
|
else:
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
333
|
+
yield event.run_error(
|
|
334
|
+
message=f"Unknown node type: {node_type}",
|
|
335
|
+
code="UNKNOWN_NODE_TYPE",
|
|
336
|
+
)
|
|
337
|
+
raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
|
|
340
338
|
|
|
341
339
|
async def _stream_edge_driven(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
|
|
342
340
|
"""边驱动流式执行"""
|
|
@@ -352,31 +350,13 @@ class Flow:
|
|
|
352
350
|
raise NodeExecutionError("NODE_NOT_FOUND", args={"node_id": current_node_id})
|
|
353
351
|
|
|
354
352
|
context.current_node = current_node_id
|
|
355
|
-
node_type = node.get("type")
|
|
353
|
+
node_type: str = node.get("type", "")
|
|
356
354
|
|
|
357
355
|
if node_type == "message":
|
|
358
356
|
async for evt in self._emit_message(node, context):
|
|
359
357
|
yield evt
|
|
360
358
|
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
361
359
|
|
|
362
|
-
elif node_type == "agent":
|
|
363
|
-
async for evt in self._execute_node_with_retry(node, context, current_node_id):
|
|
364
|
-
yield evt
|
|
365
|
-
result = context.node_results.get(current_node_id, "")
|
|
366
|
-
route_key = self._extract_route_key(result)
|
|
367
|
-
current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
|
|
368
|
-
current_node_id, "done"
|
|
369
|
-
)
|
|
370
|
-
|
|
371
|
-
elif node_type == "tool":
|
|
372
|
-
async for evt in self._execute_node_with_retry(node, context, current_node_id):
|
|
373
|
-
yield evt
|
|
374
|
-
result = context.node_results.get(current_node_id, "")
|
|
375
|
-
route_key = self._extract_route_key(result)
|
|
376
|
-
current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
|
|
377
|
-
current_node_id, "done"
|
|
378
|
-
)
|
|
379
|
-
|
|
380
360
|
elif node_type == "parallel":
|
|
381
361
|
config = node.get("config", {})
|
|
382
362
|
branches = config.get("branches", [])
|
|
@@ -387,9 +367,11 @@ class Flow:
|
|
|
387
367
|
if not branch_node:
|
|
388
368
|
return
|
|
389
369
|
context.current_node = branch_id
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
370
|
+
branch_type = branch_node.get("type", "")
|
|
371
|
+
branch_handler = self._node_handlers.get(branch_type)
|
|
372
|
+
if branch_handler:
|
|
373
|
+
result = await branch_handler.execute(branch_node, context)
|
|
374
|
+
context.set_node_result(branch_id, result)
|
|
393
375
|
|
|
394
376
|
await asyncio.gather(*[_exec_branch(bid) for bid in branches])
|
|
395
377
|
context.set_node_result(current_node_id, "done")
|
|
@@ -419,9 +401,11 @@ class Flow:
|
|
|
419
401
|
body_node = self.get_node_config(body_node_id)
|
|
420
402
|
if not body_node:
|
|
421
403
|
continue
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
404
|
+
body_type = body_node.get("type", "")
|
|
405
|
+
body_handler = self._node_handlers.get(body_type)
|
|
406
|
+
if body_handler:
|
|
407
|
+
body_result = await body_handler.execute(body_node, context)
|
|
408
|
+
context.set_node_result(body_node_id, body_result)
|
|
425
409
|
if body_nodes:
|
|
426
410
|
results.append(context.node_results.get(body_nodes[-1]))
|
|
427
411
|
|
|
@@ -445,9 +429,11 @@ class Flow:
|
|
|
445
429
|
body_node = self.get_node_config(body_node_id)
|
|
446
430
|
if not body_node:
|
|
447
431
|
continue
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
432
|
+
body_type = body_node.get("type", "")
|
|
433
|
+
body_handler = self._node_handlers.get(body_type)
|
|
434
|
+
if body_handler:
|
|
435
|
+
body_result = await body_handler.execute(body_node, context)
|
|
436
|
+
context.set_node_result(body_node_id, body_result)
|
|
451
437
|
# 检查终止条件
|
|
452
438
|
if condition_node_id:
|
|
453
439
|
cond_result = context.node_results.get(condition_node_id, "")
|
|
@@ -464,84 +450,22 @@ class Flow:
|
|
|
464
450
|
yield event.step_finished(step_name=f"loop:{current_node_id}")
|
|
465
451
|
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
466
452
|
|
|
467
|
-
elif node_type
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
# 沙箱执行
|
|
478
|
-
from .sandbox import execute_python_node
|
|
479
|
-
|
|
480
|
-
code_str = config.get("code", "")
|
|
481
|
-
py_result = execute_python_node(code_str, resolved_inputs)
|
|
482
|
-
|
|
483
|
-
# 映射 outputs 到 context.variables
|
|
484
|
-
outputs_spec: dict[str, Any] = config.get("outputs", {})
|
|
485
|
-
for key in outputs_spec:
|
|
486
|
-
if key in py_result:
|
|
487
|
-
context.set_variable(key, py_result[key])
|
|
488
|
-
|
|
489
|
-
context.set_node_result(current_node_id, _json.dumps(py_result, ensure_ascii=False))
|
|
490
|
-
yield event.step_finished(step_name=f"python:{current_node_id}")
|
|
491
|
-
current_node_id = self._resolve_next_node(current_node_id, "done")
|
|
453
|
+
elif node_type in self._node_handlers:
|
|
454
|
+
# 所有执行类节点统一分发
|
|
455
|
+
async for evt in self._execute_node_with_retry(node, context, current_node_id):
|
|
456
|
+
yield evt
|
|
457
|
+
result = context.node_results.get(current_node_id, "")
|
|
458
|
+
route_key = self._extract_route_key(result)
|
|
459
|
+
current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
|
|
460
|
+
current_node_id, "done"
|
|
461
|
+
)
|
|
492
462
|
|
|
493
463
|
else:
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
config = node_config.get("config", {})
|
|
500
|
-
|
|
501
|
-
# 设置参数到 context
|
|
502
|
-
self._set_parameters(config, context)
|
|
503
|
-
|
|
504
|
-
# 创建并执行 runnable
|
|
505
|
-
if node_type == "agent":
|
|
506
|
-
runnable = self._create_agent(config)
|
|
507
|
-
elif node_type == "tool":
|
|
508
|
-
runnable = self._create_tool(config)
|
|
509
|
-
else:
|
|
510
|
-
raise FlowError("UNKNOWN_NODE_TYPE", 400, {"type": node_type})
|
|
511
|
-
|
|
512
|
-
return await runnable.run(context)
|
|
513
|
-
|
|
514
|
-
def _set_parameters(self, config: dict, context: "FlowContext") -> None:
|
|
515
|
-
"""设置参数到 context"""
|
|
516
|
-
parameters = config.get("parameters", {})
|
|
517
|
-
|
|
518
|
-
for key, value in parameters.items():
|
|
519
|
-
resolved_value = context.resolve_reference(value) if isinstance(value, str) else value
|
|
520
|
-
context.set_variable(key, resolved_value)
|
|
521
|
-
|
|
522
|
-
def _create_agent(self, config: dict):
|
|
523
|
-
"""创建 Agent"""
|
|
524
|
-
agent_name = config.get("agent_name")
|
|
525
|
-
if not agent_name:
|
|
526
|
-
raise FlowError("MISSING_AGENT_NAME", 400)
|
|
527
|
-
|
|
528
|
-
agent = registry.create_agent(agent_name)
|
|
529
|
-
if not agent:
|
|
530
|
-
raise FlowError("AGENT_NOT_FOUND", 404, {"agent_name": agent_name})
|
|
531
|
-
|
|
532
|
-
return agent
|
|
533
|
-
|
|
534
|
-
def _create_tool(self, config: dict):
|
|
535
|
-
"""创建 Tool"""
|
|
536
|
-
tool_name = config.get("tool_name")
|
|
537
|
-
if not tool_name:
|
|
538
|
-
raise FlowError("MISSING_TOOL_NAME", 400)
|
|
539
|
-
|
|
540
|
-
tool = registry.create_tool(tool_name)
|
|
541
|
-
if not tool:
|
|
542
|
-
raise FlowError("TOOL_NOT_FOUND", 404, {"tool_name": tool_name})
|
|
543
|
-
|
|
544
|
-
return tool
|
|
464
|
+
yield event.run_error(
|
|
465
|
+
message=f"Unknown node type: {node_type}",
|
|
466
|
+
code="UNKNOWN_NODE_TYPE",
|
|
467
|
+
)
|
|
468
|
+
raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
|
|
545
469
|
|
|
546
470
|
def get_node_config(self, node_id: str) -> dict[str, Any] | None:
|
|
547
471
|
"""获取节点配置"""
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""内置节点处理器注册"""
|
|
4
|
+
|
|
5
|
+
from .agent_node import AgentNodeHandler
|
|
6
|
+
from .base import NodeHandler
|
|
7
|
+
from .detect_node import DetectNodeHandler
|
|
8
|
+
from .llm_chat_node import LLMChatNodeHandler
|
|
9
|
+
from .llm_embed_node import LLMEmbedNodeHandler
|
|
10
|
+
from .llm_rerank_node import LLMRerankNodeHandler
|
|
11
|
+
from .python_node import PythonNodeHandler
|
|
12
|
+
from .tool_node import ToolNodeHandler
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# 所有内置 handler 实例
|
|
16
|
+
builtin_handlers: list[NodeHandler] = [
|
|
17
|
+
AgentNodeHandler(),
|
|
18
|
+
ToolNodeHandler(),
|
|
19
|
+
PythonNodeHandler(),
|
|
20
|
+
LLMChatNodeHandler(),
|
|
21
|
+
LLMEmbedNodeHandler(),
|
|
22
|
+
LLMRerankNodeHandler(),
|
|
23
|
+
DetectNodeHandler(),
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
# 全局自定义节点注册
|
|
27
|
+
_global_node_handlers: dict[str, NodeHandler] = {}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def register_node_handler(node_type: str, handler: NodeHandler) -> None:
|
|
31
|
+
"""注册自定义节点处理器(全局,所有 Flow 实例共享)"""
|
|
32
|
+
_global_node_handlers[node_type] = handler
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
__all__ = [
|
|
36
|
+
"NodeHandler",
|
|
37
|
+
"builtin_handlers",
|
|
38
|
+
"register_node_handler",
|
|
39
|
+
]
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Agent 节点处理器 — 从 flow.py 提取"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
6
|
+
|
|
7
|
+
from .. import event
|
|
8
|
+
from ..exceptions import FlowError
|
|
9
|
+
from ..registry import registry
|
|
10
|
+
from .base import NodeHandler
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from ..context import FlowContext
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AgentNodeHandler(NodeHandler):
|
|
18
|
+
"""Agent 节点:通过 registry 查找 agent → ag.stream(context)"""
|
|
19
|
+
|
|
20
|
+
node_type = "agent"
|
|
21
|
+
|
|
22
|
+
def _set_parameters(self, config: dict, context: "FlowContext") -> None:
|
|
23
|
+
parameters = config.get("parameters", {})
|
|
24
|
+
for key, value in parameters.items():
|
|
25
|
+
resolved = context.resolve_reference(value) if isinstance(value, str) else value
|
|
26
|
+
context.set_variable(key, resolved)
|
|
27
|
+
|
|
28
|
+
def _create_agent(self, config: dict):
|
|
29
|
+
agent_name = config.get("agent_name")
|
|
30
|
+
if not agent_name:
|
|
31
|
+
raise FlowError("MISSING_AGENT_NAME", 400)
|
|
32
|
+
agent = registry.create_agent(agent_name)
|
|
33
|
+
if not agent:
|
|
34
|
+
raise FlowError("AGENT_NOT_FOUND", 404, {"agent_name": agent_name})
|
|
35
|
+
return agent
|
|
36
|
+
|
|
37
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
38
|
+
config = node.get("config", {})
|
|
39
|
+
self._set_parameters(config, context)
|
|
40
|
+
ag = self._create_agent(config)
|
|
41
|
+
return await ag.run(context)
|
|
42
|
+
|
|
43
|
+
async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
|
|
44
|
+
config = node.get("config", {})
|
|
45
|
+
step_name = self.get_step_name(node, node_id)
|
|
46
|
+
|
|
47
|
+
yield event.step_started(step_name=step_name)
|
|
48
|
+
self._set_parameters(config, context)
|
|
49
|
+
ag = self._create_agent(config)
|
|
50
|
+
async for evt in ag.stream(context):
|
|
51
|
+
yield evt
|
|
52
|
+
result = context.get_last_output(ag.name) or ""
|
|
53
|
+
context.set_node_result(node_id, result)
|
|
54
|
+
self.map_outputs(config, context, {"result": result})
|
|
55
|
+
yield event.step_finished(step_name=step_name)
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""NodeHandler 基类 — 所有执行类节点的公共接口"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
6
|
+
|
|
7
|
+
from .. import event
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from ..context import FlowContext
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class NodeHandler:
|
|
15
|
+
"""内置节点处理器基类
|
|
16
|
+
|
|
17
|
+
所有执行类节点(agent / tool / python / llm_chat / llm_embed / llm_rerank / detect)
|
|
18
|
+
都继承此基类,由 Flow 引擎统一分发。
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
node_type: str # 节点类型标识,子类必须设置
|
|
22
|
+
|
|
23
|
+
def get_step_name(self, node: dict, node_id: str) -> str:
|
|
24
|
+
"""step 事件标签,子类可覆盖"""
|
|
25
|
+
config = node.get("config", {})
|
|
26
|
+
label = config.get("agent_name") or config.get("tool_name") or node_id
|
|
27
|
+
return f"{self.node_type}:{label}"
|
|
28
|
+
|
|
29
|
+
def resolve_inputs(self, config: dict, context: "FlowContext") -> dict[str, Any]:
|
|
30
|
+
"""解析输入变量引用"""
|
|
31
|
+
inputs_spec = config.get("inputs", {})
|
|
32
|
+
return {k: context.resolve_reference(v) if isinstance(v, str) else v for k, v in inputs_spec.items()}
|
|
33
|
+
|
|
34
|
+
def map_outputs(self, config: dict, context: "FlowContext", result: dict) -> None:
|
|
35
|
+
"""将结果映射到 context.variables"""
|
|
36
|
+
for key in config.get("outputs", {}):
|
|
37
|
+
if isinstance(result, dict) and key in result:
|
|
38
|
+
context.set_variable(key, result[key])
|
|
39
|
+
|
|
40
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
41
|
+
"""执行节点,返回结果(将存入 node_results)
|
|
42
|
+
|
|
43
|
+
子类必须实现此方法。
|
|
44
|
+
"""
|
|
45
|
+
raise NotImplementedError
|
|
46
|
+
|
|
47
|
+
async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
|
|
48
|
+
"""流式执行,产出 AG-UI 事件
|
|
49
|
+
|
|
50
|
+
默认实现:产出 step_started,调 execute(),产出 step_finished。
|
|
51
|
+
需要流式输出的节点(如 agent, llm_chat)应覆盖此方法。
|
|
52
|
+
"""
|
|
53
|
+
step_name = self.get_step_name(node, node_id)
|
|
54
|
+
yield event.step_started(step_name=step_name)
|
|
55
|
+
result = await self.execute(node, context)
|
|
56
|
+
context.set_node_result(node_id, result)
|
|
57
|
+
config = node.get("config", {})
|
|
58
|
+
self.map_outputs(config, context, result)
|
|
59
|
+
yield event.step_finished(step_name=step_name)
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Detect 节点 — 分类/检测,输出路由键"""
|
|
4
|
+
|
|
5
|
+
import json as _json
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
|
+
|
|
8
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
9
|
+
|
|
10
|
+
from ...client import get_llm_client
|
|
11
|
+
from ..context import Usage
|
|
12
|
+
from .base import NodeHandler
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from ..context import FlowContext
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class DetectNodeHandler(NodeHandler):
|
|
20
|
+
"""分类/检测节点
|
|
21
|
+
|
|
22
|
+
对输入文本进行分类,输出路由键。结果直接用于 _extract_route_key() 路由。
|
|
23
|
+
|
|
24
|
+
输入:query(待检测文本)+ instruction + options
|
|
25
|
+
输出:{"result": "<option>"} 的 JSON 字符串
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
node_type = "detect"
|
|
29
|
+
|
|
30
|
+
def _build_classification_prompt(
|
|
31
|
+
self, instruction: str, options: list[str], query: str
|
|
32
|
+
) -> list[ChatCompletionMessageParam]:
|
|
33
|
+
options_text = "\n".join(f"- {opt}" for opt in options)
|
|
34
|
+
system = (
|
|
35
|
+
"You are a precise classifier. "
|
|
36
|
+
"Given the user's input and instruction, classify it into exactly one of the provided options. "
|
|
37
|
+
'Respond with ONLY a JSON object: {"result": "<option>"}. '
|
|
38
|
+
"Do not include any other text."
|
|
39
|
+
)
|
|
40
|
+
user = f"Instruction: {instruction}\n\nOptions:\n{options_text}\n\nInput: {query}"
|
|
41
|
+
return [
|
|
42
|
+
{"role": "system", "content": system},
|
|
43
|
+
{"role": "user", "content": user},
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
47
|
+
config = node.get("config", {})
|
|
48
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
49
|
+
|
|
50
|
+
query = resolved_inputs.get("query", "")
|
|
51
|
+
instruction = config.get("instruction", "Classify the input")
|
|
52
|
+
options = config.get("options", [])
|
|
53
|
+
model = config.get("model", "gpt-4o-mini")
|
|
54
|
+
temperature = config.get("temperature", 0.0)
|
|
55
|
+
|
|
56
|
+
messages = self._build_classification_prompt(instruction, options, query)
|
|
57
|
+
|
|
58
|
+
client = get_llm_client()
|
|
59
|
+
response = await client.chat(
|
|
60
|
+
messages=messages,
|
|
61
|
+
model=model,
|
|
62
|
+
temperature=temperature,
|
|
63
|
+
stream=False,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
result_text = ""
|
|
67
|
+
if response.choices:
|
|
68
|
+
result_text = response.choices[0].message.content or ""
|
|
69
|
+
|
|
70
|
+
if response.usage:
|
|
71
|
+
context.add_usage(
|
|
72
|
+
Usage(
|
|
73
|
+
prompt_tokens=response.usage.prompt_tokens or 0,
|
|
74
|
+
completion_tokens=response.usage.completion_tokens or 0,
|
|
75
|
+
total_tokens=response.usage.total_tokens or 0,
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# 尝试解析为 JSON,确保返回 {"result": "<option>"} 格式
|
|
80
|
+
try:
|
|
81
|
+
parsed = _json.loads(result_text)
|
|
82
|
+
if isinstance(parsed, dict) and "result" in parsed:
|
|
83
|
+
return _json.dumps(parsed, ensure_ascii=False)
|
|
84
|
+
except (ValueError, TypeError):
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
# 如果 LLM 返回的是纯文本选项,包装为标准格式
|
|
88
|
+
stripped = result_text.strip()
|
|
89
|
+
if stripped in options:
|
|
90
|
+
return _json.dumps({"result": stripped}, ensure_ascii=False)
|
|
91
|
+
|
|
92
|
+
# 兜底:返回原始文本
|
|
93
|
+
return _json.dumps({"result": stripped}, ensure_ascii=False)
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""LLM Chat 节点 — 单轮 LLM 调用(支持流式/非流式)"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any, AsyncIterator
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
9
|
+
|
|
10
|
+
from ...client import get_llm_client
|
|
11
|
+
from .. import event
|
|
12
|
+
from ..context import Usage
|
|
13
|
+
from .base import NodeHandler
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from ..context import FlowContext
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class _SafeFormatDict(dict):
|
|
21
|
+
"""安全的模板变量替换,缺失 key 时保留原始占位符"""
|
|
22
|
+
|
|
23
|
+
def __missing__(self, key: str) -> str:
|
|
24
|
+
return f"{{{key}}}"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class LLMChatNodeHandler(NodeHandler):
|
|
28
|
+
"""单轮 LLM 对话节点
|
|
29
|
+
|
|
30
|
+
与 agent 的区别:
|
|
31
|
+
- agent = 多轮对话 + tool use 循环 + 消息历史隔离
|
|
32
|
+
- llm_chat = 单轮 prompt → response,无状态,更轻量
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
node_type = "llm_chat"
|
|
36
|
+
|
|
37
|
+
def _build_prompt(self, config: dict, resolved_inputs: dict[str, Any]) -> str:
|
|
38
|
+
"""将 config.prompt 中的 {var} 占位符替换为 resolved 的输入值"""
|
|
39
|
+
template = config.get("prompt", "")
|
|
40
|
+
format_dict = _SafeFormatDict({k: str(v) if not isinstance(v, str) else v for k, v in resolved_inputs.items()})
|
|
41
|
+
return template.format_map(format_dict)
|
|
42
|
+
|
|
43
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
44
|
+
config = node.get("config", {})
|
|
45
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
46
|
+
prompt_text = self._build_prompt(config, resolved_inputs)
|
|
47
|
+
|
|
48
|
+
model = config.get("model", "gpt-4o")
|
|
49
|
+
temperature = config.get("temperature", 0.7)
|
|
50
|
+
max_tokens = config.get("max_tokens")
|
|
51
|
+
|
|
52
|
+
client = get_llm_client()
|
|
53
|
+
messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": prompt_text}]
|
|
54
|
+
|
|
55
|
+
# 如果有 system_prompt,放在前面
|
|
56
|
+
system_prompt = config.get("system_prompt")
|
|
57
|
+
if system_prompt:
|
|
58
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
59
|
+
|
|
60
|
+
response = await client.chat(
|
|
61
|
+
messages=messages,
|
|
62
|
+
model=model,
|
|
63
|
+
temperature=temperature,
|
|
64
|
+
max_tokens=max_tokens,
|
|
65
|
+
stream=False,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
result_text = ""
|
|
69
|
+
if response.choices:
|
|
70
|
+
result_text = response.choices[0].message.content or ""
|
|
71
|
+
|
|
72
|
+
if response.usage:
|
|
73
|
+
context.add_usage(
|
|
74
|
+
Usage(
|
|
75
|
+
prompt_tokens=response.usage.prompt_tokens or 0,
|
|
76
|
+
completion_tokens=response.usage.completion_tokens or 0,
|
|
77
|
+
total_tokens=response.usage.total_tokens or 0,
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
result = {"result": result_text}
|
|
82
|
+
self.map_outputs(config, context, result)
|
|
83
|
+
return result_text
|
|
84
|
+
|
|
85
|
+
async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
|
|
86
|
+
config = node.get("config", {})
|
|
87
|
+
use_stream = config.get("stream", False)
|
|
88
|
+
|
|
89
|
+
if not use_stream:
|
|
90
|
+
# 非流式:走默认 execute 路径
|
|
91
|
+
step_name = self.get_step_name(node, node_id)
|
|
92
|
+
yield event.step_started(step_name=step_name)
|
|
93
|
+
result = await self.execute(node, context)
|
|
94
|
+
context.set_node_result(node_id, result)
|
|
95
|
+
yield event.step_finished(step_name=step_name)
|
|
96
|
+
return
|
|
97
|
+
|
|
98
|
+
# 流式输出
|
|
99
|
+
step_name = self.get_step_name(node, node_id)
|
|
100
|
+
yield event.step_started(step_name=step_name)
|
|
101
|
+
|
|
102
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
103
|
+
prompt_text = self._build_prompt(config, resolved_inputs)
|
|
104
|
+
|
|
105
|
+
model = config.get("model", "gpt-4o")
|
|
106
|
+
temperature = config.get("temperature", 0.7)
|
|
107
|
+
max_tokens = config.get("max_tokens")
|
|
108
|
+
|
|
109
|
+
client = get_llm_client()
|
|
110
|
+
messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": prompt_text}]
|
|
111
|
+
|
|
112
|
+
system_prompt = config.get("system_prompt")
|
|
113
|
+
if system_prompt:
|
|
114
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
115
|
+
|
|
116
|
+
msg_id = context.message_id or str(uuid4())
|
|
117
|
+
yield event.text_message_start(message_id=msg_id, role="assistant")
|
|
118
|
+
|
|
119
|
+
content_parts: list[str] = []
|
|
120
|
+
stream_iter = await client.chat(
|
|
121
|
+
messages=messages,
|
|
122
|
+
model=model,
|
|
123
|
+
temperature=temperature,
|
|
124
|
+
max_tokens=max_tokens,
|
|
125
|
+
stream=True,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
async for chunk in stream_iter:
|
|
129
|
+
if not chunk.choices:
|
|
130
|
+
continue
|
|
131
|
+
delta = chunk.choices[0].delta
|
|
132
|
+
if delta.content:
|
|
133
|
+
content_parts.append(delta.content)
|
|
134
|
+
yield event.text_message_content(message_id=msg_id, delta=delta.content)
|
|
135
|
+
|
|
136
|
+
if chunk.choices[0].finish_reason and chunk.usage:
|
|
137
|
+
context.add_usage(
|
|
138
|
+
Usage(
|
|
139
|
+
prompt_tokens=chunk.usage.prompt_tokens or 0,
|
|
140
|
+
completion_tokens=chunk.usage.completion_tokens or 0,
|
|
141
|
+
total_tokens=chunk.usage.total_tokens or 0,
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
yield event.text_message_end(message_id=msg_id)
|
|
146
|
+
|
|
147
|
+
result_text = "".join(content_parts)
|
|
148
|
+
result = {"result": result_text}
|
|
149
|
+
self.map_outputs(config, context, result)
|
|
150
|
+
context.set_node_result(node_id, result_text)
|
|
151
|
+
|
|
152
|
+
yield event.step_finished(step_name=step_name)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""LLM Embed 节点 — 文本向量化"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from ...client import get_llm_client
|
|
8
|
+
from .base import NodeHandler
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..context import FlowContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LLMEmbedNodeHandler(NodeHandler):
|
|
16
|
+
"""文本向量化节点
|
|
17
|
+
|
|
18
|
+
输入:texts(字符串列表或单个字符串)
|
|
19
|
+
输出:{"embeddings": [[0.1, 0.2, ...], ...]}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
node_type = "llm_embed"
|
|
23
|
+
|
|
24
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
25
|
+
config = node.get("config", {})
|
|
26
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
27
|
+
|
|
28
|
+
texts = resolved_inputs.get("texts", [])
|
|
29
|
+
if isinstance(texts, str):
|
|
30
|
+
texts = [texts]
|
|
31
|
+
|
|
32
|
+
model = config.get("model", "bge-m3")
|
|
33
|
+
|
|
34
|
+
client = get_llm_client()
|
|
35
|
+
embeddings = await client.embed(texts=texts, model=model)
|
|
36
|
+
|
|
37
|
+
result = {"embeddings": embeddings}
|
|
38
|
+
self.map_outputs(config, context, result)
|
|
39
|
+
return result
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""LLM Rerank 节点 — 文档重排序"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from ...client import get_llm_client
|
|
8
|
+
from .base import NodeHandler
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..context import FlowContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LLMRerankNodeHandler(NodeHandler):
|
|
16
|
+
"""文档重排序节点
|
|
17
|
+
|
|
18
|
+
输入:query(字符串)+ documents(字符串列表)
|
|
19
|
+
输出:{"results": [{"index": 0, "score": 0.95, "text": "..."}, ...]}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
node_type = "llm_rerank"
|
|
23
|
+
|
|
24
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
25
|
+
config = node.get("config", {})
|
|
26
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
27
|
+
|
|
28
|
+
query = resolved_inputs.get("query", "")
|
|
29
|
+
documents = resolved_inputs.get("documents", [])
|
|
30
|
+
if isinstance(documents, str):
|
|
31
|
+
documents = [documents]
|
|
32
|
+
|
|
33
|
+
model = config.get("model", "bge-reranker-v2-m3")
|
|
34
|
+
top_n = config.get("top_n", 10)
|
|
35
|
+
|
|
36
|
+
client = get_llm_client()
|
|
37
|
+
raw_results = await client.rerank(
|
|
38
|
+
query=query,
|
|
39
|
+
documents=documents,
|
|
40
|
+
model=model,
|
|
41
|
+
top_n=top_n,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# raw_results: list[tuple[int, float, str]]
|
|
45
|
+
results = [{"index": idx, "score": score, "text": text} for idx, score, text in raw_results]
|
|
46
|
+
|
|
47
|
+
result = {"results": results}
|
|
48
|
+
self.map_outputs(config, context, result)
|
|
49
|
+
return result
|
agstack-1.5.0/agstack/llm/flow/sandbox.py → agstack-1.6.0/agstack/llm/flow/nodes/python_node.py
RENAMED
|
@@ -1,16 +1,26 @@
|
|
|
1
|
-
# Copyright (c) 2020-
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
2
|
|
|
3
|
-
"""Python
|
|
3
|
+
"""Python 沙箱节点处理器 — 从 sandbox.py 迁入"""
|
|
4
|
+
|
|
5
|
+
import json as _json
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
|
+
|
|
8
|
+
from .base import NodeHandler
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..context import FlowContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# ── 沙箱执行(原 sandbox.py) ──
|
|
4
16
|
|
|
5
17
|
import builtins
|
|
6
|
-
from typing import Any
|
|
7
18
|
|
|
8
19
|
from RestrictedPython import compile_restricted, safe_globals
|
|
9
20
|
from RestrictedPython.Eval import default_guarded_getitem, default_guarded_getiter
|
|
10
21
|
from RestrictedPython.Guards import guarded_unpack_sequence, safer_getattr
|
|
11
22
|
|
|
12
23
|
|
|
13
|
-
# 白名单内置模块
|
|
14
24
|
_ALLOWED_MODULES = frozenset(
|
|
15
25
|
{
|
|
16
26
|
"json",
|
|
@@ -66,3 +76,20 @@ def execute_python_node(code: str, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
|
66
76
|
raise TypeError(f"main() must return a dict, got {type(result).__name__}")
|
|
67
77
|
|
|
68
78
|
return result
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ── NodeHandler ──
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class PythonNodeHandler(NodeHandler):
|
|
85
|
+
"""Python 沙箱执行节点"""
|
|
86
|
+
|
|
87
|
+
node_type = "python"
|
|
88
|
+
|
|
89
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
90
|
+
config = node.get("config", {})
|
|
91
|
+
resolved_inputs = self.resolve_inputs(config, context)
|
|
92
|
+
code_str = config.get("code", "")
|
|
93
|
+
py_result = execute_python_node(code_str, resolved_inputs)
|
|
94
|
+
self.map_outputs(config, context, py_result)
|
|
95
|
+
return _json.dumps(py_result, ensure_ascii=False)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Copyright (c) 2020-2026 XtraVisions, All rights reserved.
|
|
2
|
+
|
|
3
|
+
"""Tool 节点处理器 — 从 flow.py 提取"""
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from ..exceptions import FlowError
|
|
8
|
+
from ..registry import registry
|
|
9
|
+
from .base import NodeHandler
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from ..context import FlowContext
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ToolNodeHandler(NodeHandler):
|
|
17
|
+
"""Tool 节点:通过 registry 查找 tool → tool.run(context)"""
|
|
18
|
+
|
|
19
|
+
node_type = "tool"
|
|
20
|
+
|
|
21
|
+
def _set_parameters(self, config: dict, context: "FlowContext") -> None:
|
|
22
|
+
parameters = config.get("parameters", {})
|
|
23
|
+
for key, value in parameters.items():
|
|
24
|
+
resolved = context.resolve_reference(value) if isinstance(value, str) else value
|
|
25
|
+
context.set_variable(key, resolved)
|
|
26
|
+
|
|
27
|
+
def _create_tool(self, config: dict):
|
|
28
|
+
tool_name = config.get("tool_name")
|
|
29
|
+
if not tool_name:
|
|
30
|
+
raise FlowError("MISSING_TOOL_NAME", 400)
|
|
31
|
+
tool = registry.create_tool(tool_name)
|
|
32
|
+
if not tool:
|
|
33
|
+
raise FlowError("TOOL_NOT_FOUND", 404, {"tool_name": tool_name})
|
|
34
|
+
return tool
|
|
35
|
+
|
|
36
|
+
async def execute(self, node: dict, context: "FlowContext") -> Any:
|
|
37
|
+
config = node.get("config", {})
|
|
38
|
+
self._set_parameters(config, context)
|
|
39
|
+
tool = self._create_tool(config)
|
|
40
|
+
return await tool.run(context)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agstack
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.6.0
|
|
4
4
|
Summary: Production-ready toolkit for building FastAPI and LLM applications
|
|
5
5
|
Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
6
6
|
Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
|
|
@@ -44,6 +44,15 @@ agstack/llm/flow/registry.py
|
|
|
44
44
|
agstack/llm/flow/sandbox.py
|
|
45
45
|
agstack/llm/flow/state.py
|
|
46
46
|
agstack/llm/flow/tool.py
|
|
47
|
+
agstack/llm/flow/nodes/__init__.py
|
|
48
|
+
agstack/llm/flow/nodes/agent_node.py
|
|
49
|
+
agstack/llm/flow/nodes/base.py
|
|
50
|
+
agstack/llm/flow/nodes/detect_node.py
|
|
51
|
+
agstack/llm/flow/nodes/llm_chat_node.py
|
|
52
|
+
agstack/llm/flow/nodes/llm_embed_node.py
|
|
53
|
+
agstack/llm/flow/nodes/llm_rerank_node.py
|
|
54
|
+
agstack/llm/flow/nodes/python_node.py
|
|
55
|
+
agstack/llm/flow/nodes/tool_node.py
|
|
47
56
|
agstack/security/__init__.py
|
|
48
57
|
agstack/security/casbin.py
|
|
49
58
|
agstack/security/crypt.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|