agstack 1.6.0__tar.gz → 1.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {agstack-1.6.0 → agstack-1.7.0}/PKG-INFO +1 -1
  2. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/agent.py +16 -11
  3. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/context.py +23 -25
  4. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/flow.py +35 -61
  5. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/agent_node.py +6 -13
  6. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/base.py +2 -10
  7. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/detect_node.py +12 -12
  8. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/llm_chat_node.py +16 -16
  9. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/llm_embed_node.py +0 -1
  10. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/llm_rerank_node.py +0 -1
  11. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/python_node.py +15 -6
  12. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/tool_node.py +3 -9
  13. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/tool.py +9 -10
  14. {agstack-1.6.0 → agstack-1.7.0}/agstack.egg-info/PKG-INFO +1 -1
  15. {agstack-1.6.0 → agstack-1.7.0}/agstack.egg-info/SOURCES.txt +2 -1
  16. {agstack-1.6.0 → agstack-1.7.0}/pyproject.toml +2 -1
  17. agstack-1.7.0/tests/test_flow_io.py +435 -0
  18. {agstack-1.6.0 → agstack-1.7.0}/LICENSE +0 -0
  19. {agstack-1.6.0 → agstack-1.7.0}/README.md +0 -0
  20. {agstack-1.6.0 → agstack-1.7.0}/agstack/__init__.py +0 -0
  21. {agstack-1.6.0 → agstack-1.7.0}/agstack/config/__init__.py +0 -0
  22. {agstack-1.6.0 → agstack-1.7.0}/agstack/config/logger.py +0 -0
  23. {agstack-1.6.0 → agstack-1.7.0}/agstack/config/manager.py +0 -0
  24. {agstack-1.6.0 → agstack-1.7.0}/agstack/config/types.py +0 -0
  25. {agstack-1.6.0 → agstack-1.7.0}/agstack/contexts.py +0 -0
  26. {agstack-1.6.0 → agstack-1.7.0}/agstack/decorators.py +0 -0
  27. {agstack-1.6.0 → agstack-1.7.0}/agstack/events.py +0 -0
  28. {agstack-1.6.0 → agstack-1.7.0}/agstack/exceptions.py +0 -0
  29. {agstack-1.6.0 → agstack-1.7.0}/agstack/fastapi/__init__.py +0 -0
  30. {agstack-1.6.0 → agstack-1.7.0}/agstack/fastapi/exception.py +0 -0
  31. {agstack-1.6.0 → agstack-1.7.0}/agstack/fastapi/middleware.py +0 -0
  32. {agstack-1.6.0 → agstack-1.7.0}/agstack/fastapi/offline.py +0 -0
  33. {agstack-1.6.0 → agstack-1.7.0}/agstack/fastapi/sse.py +0 -0
  34. {agstack-1.6.0 → agstack-1.7.0}/agstack/infra/db/__init__.py +0 -0
  35. {agstack-1.6.0 → agstack-1.7.0}/agstack/infra/es/__init__.py +0 -0
  36. {agstack-1.6.0 → agstack-1.7.0}/agstack/infra/kg/__init__.py +0 -0
  37. {agstack-1.6.0 → agstack-1.7.0}/agstack/infra/mq/__init__.py +0 -0
  38. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/__init__.py +0 -0
  39. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/client.py +0 -0
  40. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/__init__.py +0 -0
  41. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/event.py +0 -0
  42. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/exceptions.py +0 -0
  43. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/factory.py +0 -0
  44. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/loader.py +0 -0
  45. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/nodes/__init__.py +0 -0
  46. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/records.py +0 -0
  47. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/registry.py +0 -0
  48. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/sandbox.py +0 -0
  49. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/flow/state.py +0 -0
  50. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/prompts.py +0 -0
  51. {agstack-1.6.0 → agstack-1.7.0}/agstack/llm/token.py +0 -0
  52. {agstack-1.6.0 → agstack-1.7.0}/agstack/registry.py +0 -0
  53. {agstack-1.6.0 → agstack-1.7.0}/agstack/schema.py +0 -0
  54. {agstack-1.6.0 → agstack-1.7.0}/agstack/security/__init__.py +0 -0
  55. {agstack-1.6.0 → agstack-1.7.0}/agstack/security/casbin.py +0 -0
  56. {agstack-1.6.0 → agstack-1.7.0}/agstack/security/crypt.py +0 -0
  57. {agstack-1.6.0 → agstack-1.7.0}/agstack/status.py +0 -0
  58. {agstack-1.6.0 → agstack-1.7.0}/agstack.egg-info/dependency_links.txt +0 -0
  59. {agstack-1.6.0 → agstack-1.7.0}/agstack.egg-info/requires.txt +0 -0
  60. {agstack-1.6.0 → agstack-1.7.0}/agstack.egg-info/top_level.txt +0 -0
  61. {agstack-1.6.0 → agstack-1.7.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agstack
3
- Version: 1.6.0
3
+ Version: 1.7.0
4
4
  Summary: Production-ready toolkit for building FastAPI and LLM applications
5
5
  Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
6
6
  Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
@@ -64,23 +64,29 @@ class Agent:
64
64
  return tool
65
65
  return None
66
66
 
67
- async def run(self, context: "FlowContext") -> str:
67
+ async def run(self, context: "FlowContext", inputs: dict[str, Any] | None = None) -> dict[str, Any]:
68
68
  """执行 Agent 逻辑"""
69
69
  content_parts = []
70
- async for evt in self.stream(context):
70
+ async for evt in self.stream(context, inputs):
71
71
  # AG-UI 事件格式
72
72
  if isinstance(evt, dict):
73
73
  if evt.get("type") == EventType.TEXT_MESSAGE_CONTENT:
74
74
  content_parts.append(evt.get("delta", ""))
75
75
  elif evt.get("type") == EventType.RUN_ERROR:
76
76
  raise FlowError("AGENT_EXECUTION_FAILED", 500, {"error": evt.get("message")})
77
- return "".join(content_parts)
77
+ return {"result": "".join(content_parts)}
78
78
 
79
- async def stream(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
79
+ async def stream(
80
+ self, context: "FlowContext", inputs: dict[str, Any] | None = None
81
+ ) -> AsyncIterator[dict[str, Any]]:
80
82
  """流式执行 Agent,输出 AG-UI 标准事件"""
81
83
 
82
- # 输入来源:优先 input(A2A 传入),回退到 query
83
- user_input = context.get_variable("input") or context.get_variable("query", "")
84
+ # 输入来源:优先 inputs 参数,回退到 context.variables
85
+ user_input = ""
86
+ if inputs:
87
+ user_input = inputs.get("input", "")
88
+ if not user_input:
89
+ user_input = context.get_variable("input") or context.get_variable("query", "")
84
90
  msg_id = context.message_id or str(uuid4())
85
91
 
86
92
  # 添加用户消息(scoped by agent name)
@@ -214,7 +220,7 @@ class Agent:
214
220
  # 如果没有工具调用,结束循环
215
221
  if not tool_calls:
216
222
  # 存储结果供 Flow/A2A 使用
217
- context.set_node_result(self.name, assistant_content)
223
+ context.set_output(self.name, {"result": assistant_content})
218
224
  # AG-UI: TEXT_MESSAGE_END
219
225
  yield event.text_message_end(message_id=msg_id)
220
226
  return
@@ -237,15 +243,14 @@ class Agent:
237
243
  )
238
244
  continue
239
245
 
240
- # 解析 LLM 返回的工具参数并注入 context
246
+ # 解析 LLM 返回的工具参数
241
247
  try:
242
248
  tool_args = json.loads(tool_call["arguments"]) if tool_call["arguments"] else {}
243
249
  except json.JSONDecodeError:
244
250
  tool_args = {}
245
- context.update_variables(tool_args)
246
251
 
247
- # 执行工具
248
- result = await tool.execute_async(context)
252
+ # 执行工具(传入 LLM 解析的参数作为 inputs)
253
+ result = await tool.execute_async(context, tool_args)
249
254
 
250
255
  # 保存工具结果
251
256
  result_content = json.dumps(result.result) if result.success else json.dumps({"error": result.error})
@@ -50,7 +50,7 @@ class FlowContext:
50
50
  message_id: str | None = None
51
51
 
52
52
  # 图执行状态
53
- node_results: dict[str, Any] = field(default_factory=dict)
53
+ outputs: dict[str, Any] = field(default_factory=dict)
54
54
  current_node: str | None = None
55
55
 
56
56
  # 执行记录(可选)
@@ -115,33 +115,31 @@ class FlowContext:
115
115
  self.messages.clear()
116
116
  self.turn_count = 0
117
117
 
118
- def resolve_reference(self, ref: str) -> Any:
119
- """解析变量引用 {node@variable.field} 或 {node_id}"""
120
- if not isinstance(ref, str) or not ref.startswith("{"):
121
- return ref
118
+ def resolve_reference(self, ref: Any) -> Any:
119
+ """解析变量引用
122
120
 
123
- ref_content = ref[1:-1] # 移除 {}
124
- if "@" not in ref_content:
125
- # 先从 variables 查找,回退到 node_results
126
- result = self.variables.get(ref_content)
127
- if result is None:
128
- result = self.node_results.get(ref_content)
121
+ $o.node_id.field.subfield → context.outputs["node_id"]["field"]["subfield"]
122
+ $v.key → context.variables["key"]
123
+ 其他字符串 → 原样返回(字面值)
124
+ """
125
+ if not isinstance(ref, str):
126
+ return ref
127
+ if ref.startswith("$o."):
128
+ parts = ref[3:].split(".")
129
+ result = self.outputs.get(parts[0])
130
+ for part in parts[1:]:
131
+ if isinstance(result, dict):
132
+ result = result.get(part)
133
+ else:
134
+ result = getattr(result, part, None)
129
135
  return result
136
+ if ref.startswith("$v."):
137
+ return self.variables.get(ref[3:])
138
+ return ref
130
139
 
131
- node_id, var_path = ref_content.split("@", 1)
132
- result = self.node_results.get(node_id)
133
-
134
- # 支持嵌套字段访问 variable.field.subfield
135
- for field_name in var_path.split("."):
136
- if isinstance(result, dict):
137
- result = result.get(field_name)
138
- else:
139
- result = getattr(result, field_name, None)
140
- return result
141
-
142
- def set_node_result(self, node_id: str, result: Any):
143
- """设置节点执行结果"""
144
- self.node_results[node_id] = result
140
+ def set_output(self, node_id: str, result: Any):
141
+ """设置节点输出"""
142
+ self.outputs[node_id] = result
145
143
 
146
144
  def add_execution_record(self, task_id: str, status: str, **kwargs) -> None:
147
145
  """添加执行记录"""
@@ -3,7 +3,6 @@
3
3
  """Flow 定义和执行"""
4
4
 
5
5
  import asyncio
6
- import json as _json
7
6
  from dataclasses import dataclass, field
8
7
  from typing import TYPE_CHECKING, Any, AsyncIterator
9
8
  from uuid import uuid4
@@ -82,27 +81,22 @@ class Flow:
82
81
 
83
82
  @staticmethod
84
83
  def _extract_route_key(result: Any) -> str:
85
- """从节点执行结果中提取路由键。
84
+ """从节点输出 dict 中提取路由键。
86
85
 
87
- 支持 ``{"result": "qa"}`` 形式的 JSON 字符串,
88
- 以及纯字符串结果。
86
+ 节点输出 dict 中若包含 ``choice`` 字段,即为路由键。
87
+ 没有 ``choice`` 则默认 ``"done"``。
89
88
  """
90
- if not isinstance(result, str):
91
- return "done"
92
- try:
93
- parsed = _json.loads(result)
94
- if isinstance(parsed, dict) and "result" in parsed:
95
- return str(parsed["result"])
96
- except (ValueError, TypeError):
97
- pass
98
- return result or "done"
89
+ if isinstance(result, dict):
90
+ return str(result.get("choice", "done"))
91
+ return "done"
99
92
 
100
93
  # ── message 节点 ──
101
94
 
102
95
  async def _emit_message(self, node: dict, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
103
- """输出模板文本"""
96
+ """输出模板文本,支持 $v. 引用"""
104
97
  config = node.get("config", {})
105
98
  template = config.get("content", "")
99
+ # 用 variables 做 format_map 替换 {var} 占位符
106
100
  text = template.format_map(_SafeFormatDict(context.variables))
107
101
  msg_id = context.message_id or str(uuid4())
108
102
  yield event.text_message_start(message_id=msg_id, role="assistant")
@@ -180,7 +174,7 @@ class Flow:
180
174
  handler = self._node_handlers.get(node_type)
181
175
  if handler:
182
176
  result = await handler.execute(node, context)
183
- context.set_node_result(node_id, result)
177
+ context.set_output(node_id, result)
184
178
  else:
185
179
  raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
186
180
  else:
@@ -197,7 +191,7 @@ class Flow:
197
191
  config = node.get("config", {})
198
192
  template = config.get("content", "")
199
193
  text = template.format_map(_SafeFormatDict(context.variables))
200
- context.set_node_result(current_node_id, text)
194
+ context.set_output(current_node_id, {"result": text})
201
195
  current_node_id = self._resolve_next_node(current_node_id, "done")
202
196
 
203
197
  elif node_type == "parallel":
@@ -213,25 +207,22 @@ class Flow:
213
207
  branch_handler = self._node_handlers.get(branch_type)
214
208
  if branch_handler:
215
209
  result = await branch_handler.execute(branch_node, context)
216
- context.set_node_result(branch_id, result)
210
+ context.set_output(branch_id, result)
217
211
 
218
212
  await asyncio.gather(*[_run_branch(bid) for bid in branches])
219
- context.set_node_result(current_node_id, "done")
213
+ context.set_output(current_node_id, {"choice": "done"})
220
214
  current_node_id = self._resolve_next_node(current_node_id, "done")
221
215
 
222
216
  elif node_type == "iteration":
223
217
  config = node.get("config", {})
224
218
  items_ref = config.get("items", "")
225
219
  items = context.resolve_reference(items_ref) if isinstance(items_ref, str) else items_ref
226
- if isinstance(items, str):
227
- items = _json.loads(items)
228
220
  if not isinstance(items, list):
229
221
  items = [items]
230
222
 
231
223
  item_var = config.get("item_variable", "item")
232
224
  index_var = config.get("index_variable", "index")
233
225
  body_nodes: list[str] = config.get("body", [])
234
- output_var = config.get("output_variable", "iteration_results")
235
226
  results: list[Any] = []
236
227
 
237
228
  for idx, item in enumerate(items):
@@ -245,12 +236,11 @@ class Flow:
245
236
  body_handler = self._node_handlers.get(body_type)
246
237
  if body_handler:
247
238
  body_result = await body_handler.execute(body_node, context)
248
- context.set_node_result(body_node_id, body_result)
239
+ context.set_output(body_node_id, body_result)
249
240
  if body_nodes:
250
- results.append(context.node_results.get(body_nodes[-1]))
241
+ results.append(context.outputs.get(body_nodes[-1]))
251
242
 
252
- context.set_variable(output_var, results)
253
- context.set_node_result(current_node_id, _json.dumps(results, ensure_ascii=False))
243
+ context.set_output(current_node_id, {"results": results})
254
244
  current_node_id = self._resolve_next_node(current_node_id, "done")
255
245
 
256
246
  elif node_type == "loop":
@@ -271,26 +261,20 @@ class Flow:
271
261
  body_handler = self._node_handlers.get(body_type)
272
262
  if body_handler:
273
263
  body_result = await body_handler.execute(body_node, context)
274
- context.set_node_result(body_node_id, body_result)
264
+ context.set_output(body_node_id, body_result)
275
265
  if condition_node_id:
276
- cond_result = context.node_results.get(condition_node_id, "")
277
- if isinstance(cond_result, str):
278
- try:
279
- parsed = _json.loads(cond_result)
280
- if isinstance(parsed, dict) and parsed.get("result") == break_cond:
281
- break
282
- except (ValueError, TypeError):
283
- if cond_result == break_cond:
284
- break
285
-
286
- context.set_node_result(current_node_id, "done")
266
+ cond_result = context.outputs.get(condition_node_id, {})
267
+ if isinstance(cond_result, dict) and cond_result.get("choice") == break_cond:
268
+ break
269
+
270
+ context.set_output(current_node_id, {"choice": "done"})
287
271
  current_node_id = self._resolve_next_node(current_node_id, "done")
288
272
 
289
273
  elif node_type in self._node_handlers:
290
274
  # 所有执行类节点统一分发
291
275
  handler = self._node_handlers[node_type]
292
276
  result = await handler.execute(node, context)
293
- context.set_node_result(current_node_id, result)
277
+ context.set_output(current_node_id, result)
294
278
  route_key = self._extract_route_key(result)
295
279
  current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
296
280
  current_node_id, "done"
@@ -299,7 +283,7 @@ class Flow:
299
283
  else:
300
284
  raise NodeExecutionError("UNKNOWN_NODE_TYPE", args={"node_type": node_type})
301
285
 
302
- return context.node_results
286
+ return context.outputs
303
287
 
304
288
  async def stream(self, context: "FlowContext") -> AsyncIterator[dict[str, Any]]:
305
289
  """流式执行 Flow(输出 AG-UI 标准事件)"""
@@ -371,10 +355,10 @@ class Flow:
371
355
  branch_handler = self._node_handlers.get(branch_type)
372
356
  if branch_handler:
373
357
  result = await branch_handler.execute(branch_node, context)
374
- context.set_node_result(branch_id, result)
358
+ context.set_output(branch_id, result)
375
359
 
376
360
  await asyncio.gather(*[_exec_branch(bid) for bid in branches])
377
- context.set_node_result(current_node_id, "done")
361
+ context.set_output(current_node_id, {"choice": "done"})
378
362
  yield event.step_finished(step_name=f"parallel:{current_node_id}")
379
363
  current_node_id = self._resolve_next_node(current_node_id, "done")
380
364
 
@@ -382,15 +366,12 @@ class Flow:
382
366
  config = node.get("config", {})
383
367
  items_ref = config.get("items", "")
384
368
  items = context.resolve_reference(items_ref) if isinstance(items_ref, str) else items_ref
385
- if isinstance(items, str):
386
- items = _json.loads(items)
387
369
  if not isinstance(items, list):
388
370
  items = [items]
389
371
 
390
372
  item_var = config.get("item_variable", "item")
391
373
  index_var = config.get("index_variable", "index")
392
374
  body_nodes: list[str] = config.get("body", [])
393
- output_var = config.get("output_variable", "iteration_results")
394
375
  results: list[Any] = []
395
376
 
396
377
  yield event.step_started(step_name=f"iteration:{current_node_id}")
@@ -405,12 +386,11 @@ class Flow:
405
386
  body_handler = self._node_handlers.get(body_type)
406
387
  if body_handler:
407
388
  body_result = await body_handler.execute(body_node, context)
408
- context.set_node_result(body_node_id, body_result)
389
+ context.set_output(body_node_id, body_result)
409
390
  if body_nodes:
410
- results.append(context.node_results.get(body_nodes[-1]))
391
+ results.append(context.outputs.get(body_nodes[-1]))
411
392
 
412
- context.set_variable(output_var, results)
413
- context.set_node_result(current_node_id, _json.dumps(results, ensure_ascii=False))
393
+ context.set_output(current_node_id, {"results": results})
414
394
  yield event.step_finished(step_name=f"iteration:{current_node_id}")
415
395
  current_node_id = self._resolve_next_node(current_node_id, "done")
416
396
 
@@ -433,20 +413,14 @@ class Flow:
433
413
  body_handler = self._node_handlers.get(body_type)
434
414
  if body_handler:
435
415
  body_result = await body_handler.execute(body_node, context)
436
- context.set_node_result(body_node_id, body_result)
416
+ context.set_output(body_node_id, body_result)
437
417
  # 检查终止条件
438
418
  if condition_node_id:
439
- cond_result = context.node_results.get(condition_node_id, "")
440
- if isinstance(cond_result, str):
441
- try:
442
- parsed = _json.loads(cond_result)
443
- if isinstance(parsed, dict) and parsed.get("result") == break_cond:
444
- break
445
- except (ValueError, TypeError):
446
- if cond_result == break_cond:
447
- break
448
-
449
- context.set_node_result(current_node_id, "done")
419
+ cond_result = context.outputs.get(condition_node_id, {})
420
+ if isinstance(cond_result, dict) and cond_result.get("choice") == break_cond:
421
+ break
422
+
423
+ context.set_output(current_node_id, {"choice": "done"})
450
424
  yield event.step_finished(step_name=f"loop:{current_node_id}")
451
425
  current_node_id = self._resolve_next_node(current_node_id, "done")
452
426
 
@@ -454,7 +428,7 @@ class Flow:
454
428
  # 所有执行类节点统一分发
455
429
  async for evt in self._execute_node_with_retry(node, context, current_node_id):
456
430
  yield evt
457
- result = context.node_results.get(current_node_id, "")
431
+ result = context.outputs.get(current_node_id, {})
458
432
  route_key = self._extract_route_key(result)
459
433
  current_node_id = self._resolve_next_node(current_node_id, route_key) or self._resolve_next_node(
460
434
  current_node_id, "done"
@@ -15,16 +15,10 @@ if TYPE_CHECKING:
15
15
 
16
16
 
17
17
  class AgentNodeHandler(NodeHandler):
18
- """Agent 节点:通过 registry 查找 agent → ag.stream(context)"""
18
+ """Agent 节点:通过 registry 查找 agent → ag.stream(context, inputs)"""
19
19
 
20
20
  node_type = "agent"
21
21
 
22
- def _set_parameters(self, config: dict, context: "FlowContext") -> None:
23
- parameters = config.get("parameters", {})
24
- for key, value in parameters.items():
25
- resolved = context.resolve_reference(value) if isinstance(value, str) else value
26
- context.set_variable(key, resolved)
27
-
28
22
  def _create_agent(self, config: dict):
29
23
  agent_name = config.get("agent_name")
30
24
  if not agent_name:
@@ -36,20 +30,19 @@ class AgentNodeHandler(NodeHandler):
36
30
 
37
31
  async def execute(self, node: dict, context: "FlowContext") -> Any:
38
32
  config = node.get("config", {})
39
- self._set_parameters(config, context)
33
+ resolved = self.resolve_inputs(config, context)
40
34
  ag = self._create_agent(config)
41
- return await ag.run(context)
35
+ return await ag.run(context, inputs=resolved)
42
36
 
43
37
  async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
44
38
  config = node.get("config", {})
45
39
  step_name = self.get_step_name(node, node_id)
46
40
 
47
41
  yield event.step_started(step_name=step_name)
48
- self._set_parameters(config, context)
42
+ resolved = self.resolve_inputs(config, context)
49
43
  ag = self._create_agent(config)
50
- async for evt in ag.stream(context):
44
+ async for evt in ag.stream(context, inputs=resolved):
51
45
  yield evt
52
46
  result = context.get_last_output(ag.name) or ""
53
- context.set_node_result(node_id, result)
54
- self.map_outputs(config, context, {"result": result})
47
+ context.set_output(node_id, {"result": result})
55
48
  yield event.step_finished(step_name=step_name)
@@ -31,14 +31,8 @@ class NodeHandler:
31
31
  inputs_spec = config.get("inputs", {})
32
32
  return {k: context.resolve_reference(v) if isinstance(v, str) else v for k, v in inputs_spec.items()}
33
33
 
34
- def map_outputs(self, config: dict, context: "FlowContext", result: dict) -> None:
35
- """将结果映射到 context.variables"""
36
- for key in config.get("outputs", {}):
37
- if isinstance(result, dict) and key in result:
38
- context.set_variable(key, result[key])
39
-
40
34
  async def execute(self, node: dict, context: "FlowContext") -> Any:
41
- """执行节点,返回结果(将存入 node_results
35
+ """执行节点,返回结果(将存入 context.outputs
42
36
 
43
37
  子类必须实现此方法。
44
38
  """
@@ -53,7 +47,5 @@ class NodeHandler:
53
47
  step_name = self.get_step_name(node, node_id)
54
48
  yield event.step_started(step_name=step_name)
55
49
  result = await self.execute(node, context)
56
- context.set_node_result(node_id, result)
57
- config = node.get("config", {})
58
- self.map_outputs(config, context, result)
50
+ context.set_output(node_id, result)
59
51
  yield event.step_finished(step_name=step_name)
@@ -2,7 +2,6 @@
2
2
 
3
3
  """Detect 节点 — 分类/检测,输出路由键"""
4
4
 
5
- import json as _json
6
5
  from typing import TYPE_CHECKING, Any
7
6
 
8
7
  from openai.types.chat import ChatCompletionMessageParam
@@ -19,10 +18,10 @@ if TYPE_CHECKING:
19
18
  class DetectNodeHandler(NodeHandler):
20
19
  """分类/检测节点
21
20
 
22
- 对输入文本进行分类,输出路由键。结果直接用于 _extract_route_key() 路由。
21
+ 对输入文本进行分类,输出路由键。结果 dict 中的 ``choice`` 字段用于边路由。
23
22
 
24
23
  输入:query(待检测文本)+ instruction + options
25
- 输出:{"result": "<option>"} 的 JSON 字符串
24
+ 输出:{"choice": "<option>"}
26
25
  """
27
26
 
28
27
  node_type = "detect"
@@ -76,18 +75,19 @@ class DetectNodeHandler(NodeHandler):
76
75
  )
77
76
  )
78
77
 
79
- # 尝试解析为 JSON,确保返回 {"result": "<option>"} 格式
78
+ # LLM 响应中提取选项
79
+ import json as _json
80
+
81
+ option = result_text.strip()
80
82
  try:
81
- parsed = _json.loads(result_text)
83
+ parsed = _json.loads(option)
82
84
  if isinstance(parsed, dict) and "result" in parsed:
83
- return _json.dumps(parsed, ensure_ascii=False)
85
+ option = str(parsed["result"])
84
86
  except (ValueError, TypeError):
85
87
  pass
86
88
 
87
- # 如果 LLM 返回的是纯文本选项,包装为标准格式
88
- stripped = result_text.strip()
89
- if stripped in options:
90
- return _json.dumps({"result": stripped}, ensure_ascii=False)
89
+ # 如果提取的选项在合法列表中,使用它;否则用原始文本
90
+ if option not in options:
91
+ option = result_text.strip()
91
92
 
92
- # 兜底:返回原始文本
93
- return _json.dumps({"result": stripped}, ensure_ascii=False)
93
+ return {"choice": option}
@@ -2,6 +2,7 @@
2
2
 
3
3
  """LLM Chat 节点 — 单轮 LLM 调用(支持流式/非流式)"""
4
4
 
5
+ import json as _json
5
6
  from typing import TYPE_CHECKING, Any, AsyncIterator
6
7
  from uuid import uuid4
7
8
 
@@ -34,16 +35,17 @@ class LLMChatNodeHandler(NodeHandler):
34
35
 
35
36
  node_type = "llm_chat"
36
37
 
37
- def _build_prompt(self, config: dict, resolved_inputs: dict[str, Any]) -> str:
38
- """ config.prompt 中的 {var} 占位符替换为 resolved 的输入值"""
39
- template = config.get("prompt", "")
40
- format_dict = _SafeFormatDict({k: str(v) if not isinstance(v, str) else v for k, v in resolved_inputs.items()})
38
+ def _build_prompt(self, template: str, resolved_inputs: dict[str, Any]) -> str:
39
+ """将模板中的 {var} 占位符替换为 resolved 的输入值"""
40
+ format_dict = _SafeFormatDict(
41
+ {k: v if isinstance(v, str) else _json.dumps(v, ensure_ascii=False) for k, v in resolved_inputs.items()}
42
+ )
41
43
  return template.format_map(format_dict)
42
44
 
43
45
  async def execute(self, node: dict, context: "FlowContext") -> Any:
44
46
  config = node.get("config", {})
45
47
  resolved_inputs = self.resolve_inputs(config, context)
46
- prompt_text = self._build_prompt(config, resolved_inputs)
48
+ prompt_text = self._build_prompt(config.get("prompt", ""), resolved_inputs)
47
49
 
48
50
  model = config.get("model", "gpt-4o")
49
51
  temperature = config.get("temperature", 0.7)
@@ -52,10 +54,11 @@ class LLMChatNodeHandler(NodeHandler):
52
54
  client = get_llm_client()
53
55
  messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": prompt_text}]
54
56
 
55
- # 如果有 system_prompt,放在前面
57
+ # 如果有 system_prompt,支持变量替换并放在前面
56
58
  system_prompt = config.get("system_prompt")
57
59
  if system_prompt:
58
- messages.insert(0, {"role": "system", "content": system_prompt})
60
+ system_text = self._build_prompt(system_prompt, resolved_inputs)
61
+ messages.insert(0, {"role": "system", "content": system_text})
59
62
 
60
63
  response = await client.chat(
61
64
  messages=messages,
@@ -78,9 +81,7 @@ class LLMChatNodeHandler(NodeHandler):
78
81
  )
79
82
  )
80
83
 
81
- result = {"result": result_text}
82
- self.map_outputs(config, context, result)
83
- return result_text
84
+ return {"result": result_text}
84
85
 
85
86
  async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
86
87
  config = node.get("config", {})
@@ -91,7 +92,7 @@ class LLMChatNodeHandler(NodeHandler):
91
92
  step_name = self.get_step_name(node, node_id)
92
93
  yield event.step_started(step_name=step_name)
93
94
  result = await self.execute(node, context)
94
- context.set_node_result(node_id, result)
95
+ context.set_output(node_id, result)
95
96
  yield event.step_finished(step_name=step_name)
96
97
  return
97
98
 
@@ -100,7 +101,7 @@ class LLMChatNodeHandler(NodeHandler):
100
101
  yield event.step_started(step_name=step_name)
101
102
 
102
103
  resolved_inputs = self.resolve_inputs(config, context)
103
- prompt_text = self._build_prompt(config, resolved_inputs)
104
+ prompt_text = self._build_prompt(config.get("prompt", ""), resolved_inputs)
104
105
 
105
106
  model = config.get("model", "gpt-4o")
106
107
  temperature = config.get("temperature", 0.7)
@@ -111,7 +112,8 @@ class LLMChatNodeHandler(NodeHandler):
111
112
 
112
113
  system_prompt = config.get("system_prompt")
113
114
  if system_prompt:
114
- messages.insert(0, {"role": "system", "content": system_prompt})
115
+ system_text = self._build_prompt(system_prompt, resolved_inputs)
116
+ messages.insert(0, {"role": "system", "content": system_text})
115
117
 
116
118
  msg_id = context.message_id or str(uuid4())
117
119
  yield event.text_message_start(message_id=msg_id, role="assistant")
@@ -145,8 +147,6 @@ class LLMChatNodeHandler(NodeHandler):
145
147
  yield event.text_message_end(message_id=msg_id)
146
148
 
147
149
  result_text = "".join(content_parts)
148
- result = {"result": result_text}
149
- self.map_outputs(config, context, result)
150
- context.set_node_result(node_id, result_text)
150
+ context.set_output(node_id, {"result": result_text})
151
151
 
152
152
  yield event.step_finished(step_name=step_name)
@@ -35,5 +35,4 @@ class LLMEmbedNodeHandler(NodeHandler):
35
35
  embeddings = await client.embed(texts=texts, model=model)
36
36
 
37
37
  result = {"embeddings": embeddings}
38
- self.map_outputs(config, context, result)
39
38
  return result
@@ -45,5 +45,4 @@ class LLMRerankNodeHandler(NodeHandler):
45
45
  results = [{"index": idx, "score": score, "text": text} for idx, score, text in raw_results]
46
46
 
47
47
  result = {"results": results}
48
- self.map_outputs(config, context, result)
49
48
  return result
@@ -2,7 +2,6 @@
2
2
 
3
3
  """Python 沙箱节点处理器 — 从 sandbox.py 迁入"""
4
4
 
5
- import json as _json
6
5
  from typing import TYPE_CHECKING, Any
7
6
 
8
7
  from .base import NodeHandler
@@ -16,7 +15,7 @@ if TYPE_CHECKING:
16
15
 
17
16
  import builtins
18
17
 
19
- from RestrictedPython import compile_restricted, safe_globals
18
+ from RestrictedPython import compile_restricted, safe_globals, utility_builtins
20
19
  from RestrictedPython.Eval import default_guarded_getitem, default_guarded_getiter
21
20
  from RestrictedPython.Guards import guarded_unpack_sequence, safer_getattr
22
21
 
@@ -45,6 +44,11 @@ def _safe_import(name: str, *args: Any, **kwargs: Any) -> Any:
45
44
  return _builtins_import(name, *args, **kwargs)
46
45
 
47
46
 
47
+ def _full_write_guard(ob: Any) -> Any:
48
+ """允许对 list/dict/set 等可变容器的写操作"""
49
+ return ob
50
+
51
+
48
52
  def execute_python_node(code: str, inputs: dict[str, Any]) -> dict[str, Any]:
49
53
  """在 RestrictedPython 沙箱中执行用户代码
50
54
 
@@ -62,7 +66,14 @@ def execute_python_node(code: str, inputs: dict[str, Any]) -> dict[str, Any]:
62
66
  glb["_getiter_"] = default_guarded_getiter
63
67
  glb["_unpack_sequence_"] = guarded_unpack_sequence
64
68
  glb["_getattr_"] = safer_getattr
65
- glb["__builtins__"] = {**glb["__builtins__"], "__import__": _safe_import}
69
+ glb["_write_"] = _full_write_guard
70
+ glb["__builtins__"] = {
71
+ **glb["__builtins__"],
72
+ **utility_builtins,
73
+ "list": list,
74
+ "dict": dict,
75
+ "__import__": _safe_import,
76
+ }
66
77
 
67
78
  loc: dict[str, Any] = {}
68
79
  exec(byte_code, glb, loc) # noqa: S102
@@ -90,6 +101,4 @@ class PythonNodeHandler(NodeHandler):
90
101
  config = node.get("config", {})
91
102
  resolved_inputs = self.resolve_inputs(config, context)
92
103
  code_str = config.get("code", "")
93
- py_result = execute_python_node(code_str, resolved_inputs)
94
- self.map_outputs(config, context, py_result)
95
- return _json.dumps(py_result, ensure_ascii=False)
104
+ return execute_python_node(code_str, resolved_inputs)