agstack 1.8.4__tar.gz → 1.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {agstack-1.8.4 → agstack-1.10.0}/PKG-INFO +9 -10
  2. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/__init__.py +4 -0
  3. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/detect_node.py +6 -4
  4. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/llm_chat_node.py +10 -6
  5. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/llm_embed_node.py +1 -1
  6. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/llm_rerank_node.py +3 -2
  7. agstack-1.10.0/agstack/llm/flow/nodes/subflow_node.py +74 -0
  8. agstack-1.10.0/agstack/llm/flow/nodes/switch_node.py +41 -0
  9. {agstack-1.8.4 → agstack-1.10.0}/agstack.egg-info/PKG-INFO +9 -10
  10. {agstack-1.8.4 → agstack-1.10.0}/agstack.egg-info/SOURCES.txt +4 -1
  11. agstack-1.10.0/agstack.egg-info/requires.txt +17 -0
  12. {agstack-1.8.4 → agstack-1.10.0}/pyproject.toml +13 -14
  13. {agstack-1.8.4 → agstack-1.10.0}/tests/test_flow_io.py +225 -0
  14. agstack-1.10.0/tests/test_flow_switch_subflow.py +340 -0
  15. agstack-1.8.4/agstack.egg-info/requires.txt +0 -18
  16. {agstack-1.8.4 → agstack-1.10.0}/LICENSE +0 -0
  17. {agstack-1.8.4 → agstack-1.10.0}/README.md +0 -0
  18. {agstack-1.8.4 → agstack-1.10.0}/agstack/__init__.py +0 -0
  19. {agstack-1.8.4 → agstack-1.10.0}/agstack/config/__init__.py +0 -0
  20. {agstack-1.8.4 → agstack-1.10.0}/agstack/config/logger.py +0 -0
  21. {agstack-1.8.4 → agstack-1.10.0}/agstack/config/manager.py +0 -0
  22. {agstack-1.8.4 → agstack-1.10.0}/agstack/config/types.py +0 -0
  23. {agstack-1.8.4 → agstack-1.10.0}/agstack/contexts.py +0 -0
  24. {agstack-1.8.4 → agstack-1.10.0}/agstack/decorators.py +0 -0
  25. {agstack-1.8.4 → agstack-1.10.0}/agstack/events.py +0 -0
  26. {agstack-1.8.4 → agstack-1.10.0}/agstack/exceptions.py +0 -0
  27. {agstack-1.8.4 → agstack-1.10.0}/agstack/fastapi/__init__.py +0 -0
  28. {agstack-1.8.4 → agstack-1.10.0}/agstack/fastapi/exception.py +0 -0
  29. {agstack-1.8.4 → agstack-1.10.0}/agstack/fastapi/middleware.py +0 -0
  30. {agstack-1.8.4 → agstack-1.10.0}/agstack/fastapi/offline.py +0 -0
  31. {agstack-1.8.4 → agstack-1.10.0}/agstack/fastapi/sse.py +0 -0
  32. {agstack-1.8.4 → agstack-1.10.0}/agstack/infra/db/__init__.py +0 -0
  33. {agstack-1.8.4 → agstack-1.10.0}/agstack/infra/es/__init__.py +0 -0
  34. {agstack-1.8.4 → agstack-1.10.0}/agstack/infra/kg/__init__.py +0 -0
  35. {agstack-1.8.4 → agstack-1.10.0}/agstack/infra/mq/__init__.py +0 -0
  36. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/__init__.py +0 -0
  37. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/client.py +0 -0
  38. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/__init__.py +0 -0
  39. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/agent.py +0 -0
  40. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/context.py +0 -0
  41. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/event.py +0 -0
  42. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/exceptions.py +0 -0
  43. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/factory.py +0 -0
  44. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/flow.py +0 -0
  45. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/loader.py +0 -0
  46. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/agent_node.py +0 -0
  47. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/base.py +0 -0
  48. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/python_node.py +0 -0
  49. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/nodes/tool_node.py +0 -0
  50. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/records.py +0 -0
  51. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/registry.py +0 -0
  52. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/sandbox.py +0 -0
  53. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/state.py +0 -0
  54. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/flow/tool.py +0 -0
  55. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/prompts.py +0 -0
  56. {agstack-1.8.4 → agstack-1.10.0}/agstack/llm/token.py +0 -0
  57. {agstack-1.8.4 → agstack-1.10.0}/agstack/schema.py +0 -0
  58. {agstack-1.8.4 → agstack-1.10.0}/agstack/security/__init__.py +0 -0
  59. {agstack-1.8.4 → agstack-1.10.0}/agstack/security/casbin.py +0 -0
  60. {agstack-1.8.4 → agstack-1.10.0}/agstack/security/crypt.py +0 -0
  61. {agstack-1.8.4 → agstack-1.10.0}/agstack/status.py +0 -0
  62. {agstack-1.8.4 → agstack-1.10.0}/agstack.egg-info/dependency_links.txt +0 -0
  63. {agstack-1.8.4 → agstack-1.10.0}/agstack.egg-info/top_level.txt +0 -0
  64. {agstack-1.8.4 → agstack-1.10.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agstack
3
- Version: 1.8.4
3
+ Version: 1.10.0
4
4
  Summary: Production-ready toolkit for building FastAPI and LLM applications
5
5
  Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
6
6
  Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
@@ -20,24 +20,23 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.12
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
- Requires-Dist: aio-pika>=9.6.1
23
+ Requires-Dist: aio-pika>=9.6.2
24
24
  Requires-Dist: asyncpg>=0.30.0
25
25
  Requires-Dist: elasticsearch[async]>=9.3.0
26
- Requires-Dist: fastapi>=0.133.1
27
- Requires-Dist: jwcrypto>=1.5.6
26
+ Requires-Dist: fastapi>=0.136.1
27
+ Requires-Dist: jwcrypto>=1.5.7
28
28
  Requires-Dist: loguru>=0.7.3
29
29
  Requires-Dist: nebula3-python>=3.8.3
30
- Requires-Dist: openai>=2.28.0
30
+ Requires-Dist: openai>=2.34.0
31
31
  Requires-Dist: bcrypt>=4.0.0
32
32
  Requires-Dist: pycasbin>=2.8.0
33
- Requires-Dist: pydantic>=2.12.4
34
- Requires-Dist: python-multipart>=0.0.20
33
+ Requires-Dist: pydantic>=2.13.3
34
+ Requires-Dist: python-multipart>=0.0.26
35
35
  Requires-Dist: requests>=2.32.5
36
36
  Requires-Dist: RestrictedPython>=7.0
37
- Requires-Dist: sqlalchemy[asyncio]>=2.0.48
38
- Requires-Dist: sqlobjects>=1.6.0
37
+ Requires-Dist: sqlobjects>=1.9.1
39
38
  Requires-Dist: tiktoken>=0.12.0
40
- Requires-Dist: uvicorn>=0.41.0
39
+ Requires-Dist: uvicorn>=0.46.0
41
40
  Dynamic: license-file
42
41
 
43
42
  # AgStack
@@ -9,6 +9,8 @@ from .llm_chat_node import LLMChatNodeHandler
9
9
  from .llm_embed_node import LLMEmbedNodeHandler
10
10
  from .llm_rerank_node import LLMRerankNodeHandler
11
11
  from .python_node import PythonNodeHandler
12
+ from .subflow_node import SubflowNodeHandler
13
+ from .switch_node import SwitchNodeHandler
12
14
  from .tool_node import ToolNodeHandler
13
15
 
14
16
 
@@ -21,6 +23,8 @@ builtin_handlers: list[NodeHandler] = [
21
23
  LLMEmbedNodeHandler(),
22
24
  LLMRerankNodeHandler(),
23
25
  DetectNodeHandler(),
26
+ SwitchNodeHandler(),
27
+ SubflowNodeHandler(),
24
28
  ]
25
29
 
26
30
  __all__ = [
@@ -47,10 +47,12 @@ class DetectNodeHandler(NodeHandler):
47
47
  resolved_inputs = self.resolve_inputs(config, context)
48
48
 
49
49
  query = resolved_inputs.get("query", "")
50
- instruction = config.get("instruction", "Classify the input")
51
- options = config.get("options", [])
52
- model = config.get("model", "gpt-4o-mini")
53
- temperature = config.get("temperature", 0.0)
50
+ instruction = resolved_inputs.get("instruction") or config.get("instruction", "Classify the input")
51
+ _options = resolved_inputs.get("options")
52
+ options = _options if _options is not None else config.get("options", [])
53
+ model = resolved_inputs.get("model") or config.get("model", "gpt-4o-mini")
54
+ _raw_temp = resolved_inputs.get("temperature")
55
+ temperature: float = float(_raw_temp) if _raw_temp is not None else float(config.get("temperature", 0.0))
54
56
 
55
57
  messages = self._build_classification_prompt(instruction, options, query)
56
58
 
@@ -47,9 +47,11 @@ class LLMChatNodeHandler(NodeHandler):
47
47
  resolved_inputs = self.resolve_inputs(config, context)
48
48
  prompt_text = self._build_prompt(config.get("prompt", ""), resolved_inputs)
49
49
 
50
- model = config.get("model", "gpt-4o")
51
- temperature = config.get("temperature", 0.7)
52
- max_tokens = config.get("max_tokens")
50
+ model = resolved_inputs.get("model") or config.get("model", "gpt-4o")
51
+ _temp = resolved_inputs.get("temperature")
52
+ temperature: float = float(_temp) if _temp is not None else float(config.get("temperature", 0.7))
53
+ _max = resolved_inputs.get("max_tokens")
54
+ max_tokens = _max if _max is not None else config.get("max_tokens")
53
55
 
54
56
  client = get_llm_client()
55
57
  messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": prompt_text}]
@@ -103,9 +105,11 @@ class LLMChatNodeHandler(NodeHandler):
103
105
  resolved_inputs = self.resolve_inputs(config, context)
104
106
  prompt_text = self._build_prompt(config.get("prompt", ""), resolved_inputs)
105
107
 
106
- model = config.get("model", "gpt-4o")
107
- temperature = config.get("temperature", 0.7)
108
- max_tokens = config.get("max_tokens")
108
+ model = resolved_inputs.get("model") or config.get("model", "gpt-4o")
109
+ _temp = resolved_inputs.get("temperature")
110
+ temperature: float = float(_temp) if _temp is not None else float(config.get("temperature", 0.7))
111
+ _max = resolved_inputs.get("max_tokens")
112
+ max_tokens = _max if _max is not None else config.get("max_tokens")
109
113
 
110
114
  client = get_llm_client()
111
115
  messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": prompt_text}]
@@ -29,7 +29,7 @@ class LLMEmbedNodeHandler(NodeHandler):
29
29
  if isinstance(texts, str):
30
30
  texts = [texts]
31
31
 
32
- model = config.get("model", "bge-m3")
32
+ model = resolved_inputs.get("model") or config.get("model", "bge-m3")
33
33
 
34
34
  client = get_llm_client()
35
35
  embeddings = await client.embed(texts=texts, model=model)
@@ -30,8 +30,9 @@ class LLMRerankNodeHandler(NodeHandler):
30
30
  if isinstance(documents, str):
31
31
  documents = [documents]
32
32
 
33
- model = config.get("model", "bge-reranker-v2-m3")
34
- top_n = config.get("top_n", 10)
33
+ model = resolved_inputs.get("model") or config.get("model", "bge-reranker-v2-m3")
34
+ _top_n = resolved_inputs.get("top_n")
35
+ top_n = _top_n if _top_n is not None else config.get("top_n", 10)
35
36
 
36
37
  client = get_llm_client()
37
38
  raw_results = await client.rerank(
@@ -0,0 +1,74 @@
1
+ # Copyright (c) 2020-2026 XtraVisions, All rights reserved.
2
+
3
+ """Subflow 节点 — 引用并执行另一个 Flow 配置,实现流程复用"""
4
+
5
+ from typing import TYPE_CHECKING, Any, AsyncIterator
6
+
7
+ from ..exceptions import FlowError
8
+ from .base import NodeHandler
9
+
10
+
11
+ if TYPE_CHECKING:
12
+ from ..context import FlowContext
13
+
14
+
15
+ class SubflowNodeHandler(NodeHandler):
16
+ """子流程节点
17
+
18
+ 通过 flow_name 加载另一个 Flow 实例,在当前 FlowContext 上执行。
19
+ 子 flow 与父 flow 共享同一个 FlowContext。
20
+ """
21
+
22
+ node_type = "subflow"
23
+
24
+ def _load_subflow(self, config: dict):
25
+ """加载子 flow 实例"""
26
+ from ..loader import FlowLoader
27
+ from ..registry import registry
28
+
29
+ flow_name = config.get("flow_name", "")
30
+
31
+ # 优先从 registry 获取已注册的 flow
32
+ sub_flow = registry.create_flow(flow_name)
33
+ if sub_flow is not None:
34
+ return sub_flow
35
+
36
+ # 其次从内联配置加载
37
+ flow_config = config.get("flow_config")
38
+ if flow_config is not None:
39
+ return FlowLoader.load_from_dict(flow_config)
40
+
41
+ raise FlowError("SUBFLOW_NOT_FOUND", args={"flow_name": flow_name})
42
+
43
+ def _resolve_and_apply_inputs(self, config: dict, context: "FlowContext") -> None:
44
+ """解析 inputs 并更新 context.variables"""
45
+ inputs_spec = config.get("inputs", {})
46
+ for key, ref in inputs_spec.items():
47
+ value = context.resolve_reference(ref) if isinstance(ref, str) else ref
48
+ context.set_variable(key, value)
49
+
50
+ def _get_last_node_output(self, sub_flow, context: "FlowContext") -> Any:
51
+ """获取子 flow 最后一个节点的输出"""
52
+ if sub_flow.nodes:
53
+ last_node_id = sub_flow.nodes[-1].get("id")
54
+ if last_node_id and last_node_id in context.outputs:
55
+ return context.outputs[last_node_id]
56
+ return context.outputs
57
+
58
+ async def execute(self, node: dict, context: "FlowContext") -> Any:
59
+ config = node.get("config", {})
60
+ self._resolve_and_apply_inputs(config, context)
61
+ sub_flow = self._load_subflow(config)
62
+ await sub_flow.run(context)
63
+ return self._get_last_node_output(sub_flow, context)
64
+
65
+ async def stream(self, node: dict, context: "FlowContext", node_id: str) -> AsyncIterator[dict[str, Any]]:
66
+ config = node.get("config", {})
67
+ self._resolve_and_apply_inputs(config, context)
68
+ sub_flow = self._load_subflow(config)
69
+
70
+ async for evt in sub_flow.stream(context):
71
+ yield evt
72
+
73
+ result = self._get_last_node_output(sub_flow, context)
74
+ context.set_output(node_id, result)
@@ -0,0 +1,41 @@
1
+ # Copyright (c) 2020-2026 XtraVisions, All rights reserved.
2
+
3
+ """Switch 节点 — 纯变量匹配路由,零 LLM 调用开销"""
4
+
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from .base import NodeHandler
8
+
9
+
10
+ if TYPE_CHECKING:
11
+ from ..context import FlowContext
12
+
13
+
14
+ class SwitchNodeHandler(NodeHandler):
15
+ """条件路由节点
16
+
17
+ 读取 flow 变量值,与 cases 映射表匹配,返回路由键。
18
+ 用于根据系统配置在运行时选择不同执行路径。
19
+
20
+ 输出:{"choice": "<matched_case>"}
21
+ """
22
+
23
+ node_type = "switch"
24
+
25
+ async def execute(self, node: dict, context: "FlowContext") -> Any:
26
+ config = node.get("config", {})
27
+ variable_ref = config.get("variable", "")
28
+ cases: dict[str, str] = config.get("cases", {})
29
+ default = config.get("default")
30
+
31
+ value = context.resolve_reference(variable_ref)
32
+ value_str = str(value) if value is not None else ""
33
+
34
+ if value_str in cases:
35
+ choice = value_str
36
+ elif default is not None:
37
+ choice = default
38
+ else:
39
+ choice = value_str
40
+
41
+ return {"choice": choice}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agstack
3
- Version: 1.8.4
3
+ Version: 1.10.0
4
4
  Summary: Production-ready toolkit for building FastAPI and LLM applications
5
5
  Author-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
6
6
  Maintainer-email: XtraVisions <gitadmin@xtravisions.com>, Chen Hao <chenhao@xtravisions.com>
@@ -20,24 +20,23 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.12
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
- Requires-Dist: aio-pika>=9.6.1
23
+ Requires-Dist: aio-pika>=9.6.2
24
24
  Requires-Dist: asyncpg>=0.30.0
25
25
  Requires-Dist: elasticsearch[async]>=9.3.0
26
- Requires-Dist: fastapi>=0.133.1
27
- Requires-Dist: jwcrypto>=1.5.6
26
+ Requires-Dist: fastapi>=0.136.1
27
+ Requires-Dist: jwcrypto>=1.5.7
28
28
  Requires-Dist: loguru>=0.7.3
29
29
  Requires-Dist: nebula3-python>=3.8.3
30
- Requires-Dist: openai>=2.28.0
30
+ Requires-Dist: openai>=2.34.0
31
31
  Requires-Dist: bcrypt>=4.0.0
32
32
  Requires-Dist: pycasbin>=2.8.0
33
- Requires-Dist: pydantic>=2.12.4
34
- Requires-Dist: python-multipart>=0.0.20
33
+ Requires-Dist: pydantic>=2.13.3
34
+ Requires-Dist: python-multipart>=0.0.26
35
35
  Requires-Dist: requests>=2.32.5
36
36
  Requires-Dist: RestrictedPython>=7.0
37
- Requires-Dist: sqlalchemy[asyncio]>=2.0.48
38
- Requires-Dist: sqlobjects>=1.6.0
37
+ Requires-Dist: sqlobjects>=1.9.1
39
38
  Requires-Dist: tiktoken>=0.12.0
40
- Requires-Dist: uvicorn>=0.41.0
39
+ Requires-Dist: uvicorn>=0.46.0
41
40
  Dynamic: license-file
42
41
 
43
42
  # AgStack
@@ -51,8 +51,11 @@ agstack/llm/flow/nodes/llm_chat_node.py
51
51
  agstack/llm/flow/nodes/llm_embed_node.py
52
52
  agstack/llm/flow/nodes/llm_rerank_node.py
53
53
  agstack/llm/flow/nodes/python_node.py
54
+ agstack/llm/flow/nodes/subflow_node.py
55
+ agstack/llm/flow/nodes/switch_node.py
54
56
  agstack/llm/flow/nodes/tool_node.py
55
57
  agstack/security/__init__.py
56
58
  agstack/security/casbin.py
57
59
  agstack/security/crypt.py
58
- tests/test_flow_io.py
60
+ tests/test_flow_io.py
61
+ tests/test_flow_switch_subflow.py
@@ -0,0 +1,17 @@
1
+ aio-pika>=9.6.2
2
+ asyncpg>=0.30.0
3
+ elasticsearch[async]>=9.3.0
4
+ fastapi>=0.136.1
5
+ jwcrypto>=1.5.7
6
+ loguru>=0.7.3
7
+ nebula3-python>=3.8.3
8
+ openai>=2.34.0
9
+ bcrypt>=4.0.0
10
+ pycasbin>=2.8.0
11
+ pydantic>=2.13.3
12
+ python-multipart>=0.0.26
13
+ requests>=2.32.5
14
+ RestrictedPython>=7.0
15
+ sqlobjects>=1.9.1
16
+ tiktoken>=0.12.0
17
+ uvicorn>=0.46.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "agstack"
3
- version = "1.8.4"
3
+ version = "1.10.0"
4
4
  description = "Production-ready toolkit for building FastAPI and LLM applications"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -39,31 +39,30 @@ classifiers = [
39
39
  ]
40
40
  requires-python = ">=3.12"
41
41
  dependencies = [
42
- "aio-pika>=9.6.1",
42
+ "aio-pika>=9.6.2",
43
43
  "asyncpg>=0.30.0",
44
44
  "elasticsearch[async]>=9.3.0",
45
- "fastapi>=0.133.1",
46
- "jwcrypto>=1.5.6",
45
+ "fastapi>=0.136.1",
46
+ "jwcrypto>=1.5.7",
47
47
  "loguru>=0.7.3",
48
48
  "nebula3-python>=3.8.3",
49
- "openai>=2.28.0",
49
+ "openai>=2.34.0",
50
50
  "bcrypt>=4.0.0",
51
51
  "pycasbin>=2.8.0",
52
- "pydantic>=2.12.4",
53
- "python-multipart>=0.0.20",
52
+ "pydantic>=2.13.3",
53
+ "python-multipart>=0.0.26",
54
54
  "requests>=2.32.5",
55
55
  "RestrictedPython>=7.0",
56
- "sqlalchemy[asyncio]>=2.0.48",
57
- "sqlobjects>=1.6.0",
56
+ "sqlobjects>=1.9.1",
58
57
  "tiktoken>=0.12.0",
59
- "uvicorn>=0.41.0",
58
+ "uvicorn>=0.46.0",
60
59
  ]
61
60
  [dependency-groups]
62
61
  dev = [
63
- "pre-commit>=4.4.0",
64
- "pyright>=1.1.407",
65
- "pytest>=9.0.2",
66
- "ruff>=0.15.5",
62
+ "pre-commit>=4.6.0",
63
+ "pyright>=1.1.409",
64
+ "pytest>=9.0.3",
65
+ "ruff>=0.15.12",
67
66
  "setuptools>=82.0.0",
68
67
  ]
69
68
 
@@ -257,6 +257,76 @@ class TestDetectNodeHandler:
257
257
  assert isinstance(result, dict)
258
258
  assert result == {"choice": "qa"}
259
259
 
260
+ @patch("agstack.llm.flow.nodes.detect_node.get_llm_client")
261
+ def test_dynamic_instruction_and_options(self, mock_get_client):
262
+ from agstack.llm.flow.nodes.detect_node import DetectNodeHandler
263
+
264
+ mock_response = MagicMock()
265
+ mock_choice = MagicMock()
266
+ mock_choice.message.content = '{"result": "billing"}'
267
+ mock_response.choices = [mock_choice]
268
+ mock_response.usage = MagicMock(prompt_tokens=10, completion_tokens=5, total_tokens=15)
269
+
270
+ mock_client = AsyncMock()
271
+ mock_client.chat = AsyncMock(return_value=mock_response)
272
+ mock_get_client.return_value = mock_client
273
+
274
+ handler = DetectNodeHandler()
275
+ ctx = FlowContext(
276
+ variables={
277
+ "my_instruction": "classify ticket type",
278
+ "my_options": ["billing", "technical", "general"],
279
+ }
280
+ )
281
+ node = {
282
+ "id": "detect1",
283
+ "type": "detect",
284
+ "config": {
285
+ "inputs": {
286
+ "query": "$v.user_query",
287
+ "instruction": "$v.my_instruction",
288
+ "options": "$v.my_options",
289
+ },
290
+ },
291
+ }
292
+ ctx.variables["user_query"] = "I was charged twice"
293
+ result = asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
294
+ assert result == {"choice": "billing"}
295
+
296
+ @patch("agstack.llm.flow.nodes.detect_node.get_llm_client")
297
+ def test_dynamic_model_and_temperature(self, mock_get_client):
298
+ from agstack.llm.flow.nodes.detect_node import DetectNodeHandler
299
+
300
+ mock_response = MagicMock()
301
+ mock_choice = MagicMock()
302
+ mock_choice.message.content = '{"result": "qa"}'
303
+ mock_response.choices = [mock_choice]
304
+ mock_response.usage = MagicMock(prompt_tokens=10, completion_tokens=5, total_tokens=15)
305
+
306
+ mock_client = AsyncMock()
307
+ mock_client.chat = AsyncMock(return_value=mock_response)
308
+ mock_get_client.return_value = mock_client
309
+
310
+ handler = DetectNodeHandler()
311
+ ctx = FlowContext(variables={"chosen_model": "qwen2.5-72b", "temp": 0.1})
312
+ node = {
313
+ "id": "detect2",
314
+ "type": "detect",
315
+ "config": {
316
+ "options": ["qa", "chitchat"],
317
+ "inputs": {
318
+ "query": "hello",
319
+ "model": "$v.chosen_model",
320
+ "temperature": "$v.temp",
321
+ },
322
+ },
323
+ }
324
+ result = asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
325
+ call_args = mock_client.chat.call_args
326
+ assert call_args.kwargs["model"] == "qwen2.5-72b"
327
+ assert call_args.kwargs["temperature"] == 0.1
328
+ assert result == {"choice": "qa"}
329
+
260
330
 
261
331
  # ── LLMChatNodeHandler ──
262
332
 
@@ -344,6 +414,47 @@ class TestLLMChatNodeHandler:
344
414
  assert len(system_msg) == 1
345
415
  assert system_msg[0]["content"] == "You speak Chinese"
346
416
 
417
+ @patch("agstack.llm.flow.nodes.llm_chat_node.get_llm_client")
418
+ def test_dynamic_model_temperature_max_tokens(self, mock_get_client):
419
+ from agstack.llm.flow.nodes.llm_chat_node import LLMChatNodeHandler
420
+
421
+ mock_response = MagicMock()
422
+ mock_choice = MagicMock()
423
+ mock_choice.message.content = "response"
424
+ mock_response.choices = [mock_choice]
425
+ mock_response.usage = MagicMock(prompt_tokens=5, completion_tokens=3, total_tokens=8)
426
+
427
+ mock_client = AsyncMock()
428
+ mock_client.chat = AsyncMock(return_value=mock_response)
429
+ mock_get_client.return_value = mock_client
430
+
431
+ handler = LLMChatNodeHandler()
432
+ ctx = FlowContext(
433
+ variables={
434
+ "chosen_model": "qwen2.5-72b",
435
+ "temp": 0.2,
436
+ "max_tok": 512,
437
+ }
438
+ )
439
+ node = {
440
+ "id": "chat1",
441
+ "type": "llm_chat",
442
+ "config": {
443
+ "prompt": "Hello",
444
+ "model": "gpt-4o",
445
+ "inputs": {
446
+ "model": "$v.chosen_model",
447
+ "temperature": "$v.temp",
448
+ "max_tokens": "$v.max_tok",
449
+ },
450
+ },
451
+ }
452
+ asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
453
+ call_args = mock_client.chat.call_args
454
+ assert call_args.kwargs["model"] == "qwen2.5-72b"
455
+ assert call_args.kwargs["temperature"] == 0.2
456
+ assert call_args.kwargs["max_tokens"] == 512
457
+
347
458
 
348
459
  # ── Flow routing ──
349
460
 
@@ -433,3 +544,117 @@ class TestDataFlowIntegration:
433
544
  }
434
545
  result = asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
435
546
  assert result == {"result": 30}
547
+
548
+
549
+ class TestLLMRerankNodeHandler:
550
+ """LLM Rerank 节点动态参数测试"""
551
+
552
+ @patch("agstack.llm.flow.nodes.llm_rerank_node.get_llm_client")
553
+ def test_dynamic_model_and_top_n(self, mock_get_client):
554
+ from agstack.llm.flow.nodes.llm_rerank_node import LLMRerankNodeHandler
555
+
556
+ mock_client = AsyncMock()
557
+ mock_client.rerank = AsyncMock(return_value=[(0, 0.95, "doc A"), (1, 0.80, "doc B")])
558
+ mock_get_client.return_value = mock_client
559
+
560
+ handler = LLMRerankNodeHandler()
561
+ ctx = FlowContext(variables={"rerank_model": "bge-reranker-large", "topk": 2})
562
+ node = {
563
+ "id": "rerank1",
564
+ "type": "llm_rerank",
565
+ "config": {
566
+ "model": "bge-reranker-v2-m3",
567
+ "inputs": {
568
+ "query": "best python book",
569
+ "documents": ["doc A", "doc B", "doc C"],
570
+ "model": "$v.rerank_model",
571
+ "top_n": "$v.topk",
572
+ },
573
+ },
574
+ }
575
+ result = asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
576
+ call_args = mock_client.rerank.call_args
577
+ assert call_args.kwargs["model"] == "bge-reranker-large"
578
+ assert call_args.kwargs["top_n"] == 2
579
+ assert result == {
580
+ "results": [{"index": 0, "score": 0.95, "text": "doc A"}, {"index": 1, "score": 0.80, "text": "doc B"}]
581
+ }
582
+
583
+ @patch("agstack.llm.flow.nodes.llm_rerank_node.get_llm_client")
584
+ def test_static_fallback_still_works(self, mock_get_client):
585
+ from agstack.llm.flow.nodes.llm_rerank_node import LLMRerankNodeHandler
586
+
587
+ mock_client = AsyncMock()
588
+ mock_client.rerank = AsyncMock(return_value=[(0, 0.9, "doc A")])
589
+ mock_get_client.return_value = mock_client
590
+
591
+ handler = LLMRerankNodeHandler()
592
+ ctx = FlowContext()
593
+ node = {
594
+ "id": "rerank2",
595
+ "type": "llm_rerank",
596
+ "config": {
597
+ "model": "bge-reranker-v2-m3",
598
+ "top_n": 5,
599
+ "inputs": {
600
+ "query": "test",
601
+ "documents": ["doc A"],
602
+ },
603
+ },
604
+ }
605
+ asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
606
+ call_args = mock_client.rerank.call_args
607
+ assert call_args.kwargs["model"] == "bge-reranker-v2-m3"
608
+ assert call_args.kwargs["top_n"] == 5
609
+
610
+
611
+ class TestLLMEmbedNodeHandler:
612
+ """LLM Embed 节点动态参数测试"""
613
+
614
+ @patch("agstack.llm.flow.nodes.llm_embed_node.get_llm_client")
615
+ def test_dynamic_model(self, mock_get_client):
616
+ from agstack.llm.flow.nodes.llm_embed_node import LLMEmbedNodeHandler
617
+
618
+ mock_client = AsyncMock()
619
+ mock_client.embed = AsyncMock(return_value=[[0.1, 0.2, 0.3]])
620
+ mock_get_client.return_value = mock_client
621
+
622
+ handler = LLMEmbedNodeHandler()
623
+ ctx = FlowContext(variables={"embed_model": "text-embedding-3-large"})
624
+ node = {
625
+ "id": "embed1",
626
+ "type": "llm_embed",
627
+ "config": {
628
+ "model": "bge-m3",
629
+ "inputs": {
630
+ "texts": ["hello world"],
631
+ "model": "$v.embed_model",
632
+ },
633
+ },
634
+ }
635
+ result = asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
636
+ call_args = mock_client.embed.call_args
637
+ assert call_args.kwargs["model"] == "text-embedding-3-large"
638
+ assert result == {"embeddings": [[0.1, 0.2, 0.3]]}
639
+
640
+ @patch("agstack.llm.flow.nodes.llm_embed_node.get_llm_client")
641
+ def test_static_model_fallback(self, mock_get_client):
642
+ from agstack.llm.flow.nodes.llm_embed_node import LLMEmbedNodeHandler
643
+
644
+ mock_client = AsyncMock()
645
+ mock_client.embed = AsyncMock(return_value=[[0.1, 0.2]])
646
+ mock_get_client.return_value = mock_client
647
+
648
+ handler = LLMEmbedNodeHandler()
649
+ ctx = FlowContext()
650
+ node = {
651
+ "id": "embed2",
652
+ "type": "llm_embed",
653
+ "config": {
654
+ "model": "bge-m3",
655
+ "inputs": {"texts": ["hello"]},
656
+ },
657
+ }
658
+ asyncio.get_event_loop().run_until_complete(handler.execute(node, ctx))
659
+ call_args = mock_client.embed.call_args
660
+ assert call_args.kwargs["model"] == "bge-m3"
@@ -0,0 +1,340 @@
1
+ # Copyright (c) 2020-2026 XtraVisions, All rights reserved.
2
+
3
+ """Switch 和 Subflow 节点测试"""
4
+
5
+ import asyncio
6
+
7
+ import pytest
8
+
9
+ from agstack.llm.flow.context import FlowContext
10
+ from agstack.llm.flow.flow import Flow
11
+ from agstack.llm.flow.nodes.subflow_node import SubflowNodeHandler
12
+ from agstack.llm.flow.nodes.switch_node import SwitchNodeHandler
13
+
14
+
15
+ def run(coro):
16
+ return asyncio.get_event_loop().run_until_complete(coro)
17
+
18
+
19
+ # ── Switch 节点 ──
20
+
21
+
22
+ class TestSwitchNode:
23
+ """switch 节点单元测试"""
24
+
25
+ def setup_method(self):
26
+ self.handler = SwitchNodeHandler()
27
+
28
+ def test_match_case(self):
29
+ """正常匹配 cases 中的值"""
30
+ ctx = FlowContext(variables={"model_tier": "strong"})
31
+ node = {
32
+ "id": "sw1",
33
+ "type": "switch",
34
+ "config": {
35
+ "variable": "$v.model_tier",
36
+ "cases": {"strong": "research_agent", "basic": "research_pipeline"},
37
+ },
38
+ }
39
+ result = run(self.handler.execute(node, ctx))
40
+ assert result == {"choice": "strong"}
41
+
42
+ def test_no_match_with_default(self):
43
+ """未匹配时使用 default"""
44
+ ctx = FlowContext(variables={"model_tier": "unknown"})
45
+ node = {
46
+ "id": "sw1",
47
+ "type": "switch",
48
+ "config": {
49
+ "variable": "$v.model_tier",
50
+ "cases": {"strong": "research_agent", "basic": "research_pipeline"},
51
+ "default": "basic",
52
+ },
53
+ }
54
+ result = run(self.handler.execute(node, ctx))
55
+ assert result == {"choice": "basic"}
56
+
57
+ def test_no_match_no_default(self):
58
+ """未匹配且无 default 时使用变量原始值"""
59
+ ctx = FlowContext(variables={"model_tier": "medium"})
60
+ node = {
61
+ "id": "sw1",
62
+ "type": "switch",
63
+ "config": {
64
+ "variable": "$v.model_tier",
65
+ "cases": {"strong": "research_agent", "basic": "research_pipeline"},
66
+ },
67
+ }
68
+ result = run(self.handler.execute(node, ctx))
69
+ assert result == {"choice": "medium"}
70
+
71
+ def test_none_variable(self):
72
+ """变量值为 None 时转为空字符串"""
73
+ ctx = FlowContext()
74
+ node = {
75
+ "id": "sw1",
76
+ "type": "switch",
77
+ "config": {
78
+ "variable": "$v.missing",
79
+ "cases": {"strong": "a"},
80
+ "default": "fallback",
81
+ },
82
+ }
83
+ result = run(self.handler.execute(node, ctx))
84
+ assert result == {"choice": "fallback"}
85
+
86
+ def test_integer_variable(self):
87
+ """整数变量转为字符串后匹配"""
88
+ ctx = FlowContext(variables={"level": 2})
89
+ node = {
90
+ "id": "sw1",
91
+ "type": "switch",
92
+ "config": {
93
+ "variable": "$v.level",
94
+ "cases": {"1": "low", "2": "mid", "3": "high"},
95
+ },
96
+ }
97
+ result = run(self.handler.execute(node, ctx))
98
+ assert result == {"choice": "2"}
99
+
100
+ def test_node_type(self):
101
+ assert self.handler.node_type == "switch"
102
+
103
+
104
+ # ── Subflow 节点 ──
105
+
106
+
107
+ class TestSubflowNode:
108
+ """subflow 节点单元测试"""
109
+
110
+ def setup_method(self):
111
+ self.handler = SubflowNodeHandler()
112
+
113
+ def test_subflow_executes_child_flow(self):
114
+ """子 flow 正常执行并返回结果"""
115
+ child_flow = Flow(
116
+ flow_id="child_1",
117
+ name="simple_flow",
118
+ nodes=[
119
+ {
120
+ "id": "py1",
121
+ "type": "python",
122
+ "config": {"code": "def main(**kwargs):\n return {'answer': 42}"},
123
+ },
124
+ ],
125
+ edges=[],
126
+ )
127
+
128
+ from agstack.llm.flow.registry import registry
129
+
130
+ registry._flows["simple_flow"] = lambda **kwargs: child_flow
131
+
132
+ try:
133
+ ctx = FlowContext()
134
+ node = {
135
+ "id": "sub1",
136
+ "type": "subflow",
137
+ "config": {"flow_name": "simple_flow"},
138
+ }
139
+ result = run(self.handler.execute(node, ctx))
140
+ assert result == {"answer": 42}
141
+ finally:
142
+ registry._flows.pop("simple_flow", None)
143
+
144
+ def test_subflow_with_inputs(self):
145
+ """子 flow 接收 inputs 参数"""
146
+ child_flow = Flow(
147
+ flow_id="child_2",
148
+ name="echo_flow",
149
+ nodes=[
150
+ {
151
+ "id": "py1",
152
+ "type": "python",
153
+ "config": {
154
+ "code": "def main(**kwargs):\n return {'echo': kwargs.get('query', '')}",
155
+ "inputs": {"query": "$v.query"},
156
+ },
157
+ },
158
+ ],
159
+ edges=[],
160
+ )
161
+
162
+ from agstack.llm.flow.registry import registry
163
+
164
+ registry._flows["echo_flow"] = lambda **kwargs: child_flow
165
+
166
+ try:
167
+ ctx = FlowContext(variables={"user_query": "hello world"})
168
+ node = {
169
+ "id": "sub1",
170
+ "type": "subflow",
171
+ "config": {
172
+ "flow_name": "echo_flow",
173
+ "inputs": {"query": "$v.user_query"},
174
+ },
175
+ }
176
+ result = run(self.handler.execute(node, ctx))
177
+ assert result == {"echo": "hello world"}
178
+ assert ctx.variables["query"] == "hello world"
179
+ finally:
180
+ registry._flows.pop("echo_flow", None)
181
+
182
+ def test_subflow_not_found(self):
183
+ """找不到子 flow 时抛出 FlowError"""
184
+ from agstack.llm.flow.exceptions import FlowError
185
+
186
+ ctx = FlowContext()
187
+ node = {
188
+ "id": "sub1",
189
+ "type": "subflow",
190
+ "config": {"flow_name": "nonexistent_flow"},
191
+ }
192
+ with pytest.raises(FlowError):
193
+ run(self.handler.execute(node, ctx))
194
+
195
+ def test_subflow_inline_config(self):
196
+ """通过内联 flow_config 加载子 flow"""
197
+ ctx = FlowContext()
198
+ node = {
199
+ "id": "sub1",
200
+ "type": "subflow",
201
+ "config": {
202
+ "flow_name": "not_registered",
203
+ "flow_config": {
204
+ "flow_id": "inline_1",
205
+ "name": "inline_flow",
206
+ "nodes": [
207
+ {
208
+ "id": "py1",
209
+ "type": "python",
210
+ "config": {"code": "def main(**kwargs):\n return {'inline': True}"},
211
+ },
212
+ ],
213
+ "edges": [],
214
+ },
215
+ },
216
+ }
217
+ result = run(self.handler.execute(node, ctx))
218
+ assert result == {"inline": True}
219
+
220
+ def test_subflow_stream(self):
221
+ """流式执行子 flow 并透传事件"""
222
+ child_flow = Flow(
223
+ flow_id="child_s",
224
+ name="stream_flow",
225
+ nodes=[
226
+ {
227
+ "id": "py1",
228
+ "type": "python",
229
+ "config": {"code": "def main(**kwargs):\n return {'streamed': True}"},
230
+ },
231
+ ],
232
+ edges=[{"source": "py1", "target": None}],
233
+ )
234
+
235
+ from agstack.llm.flow.registry import registry
236
+
237
+ registry._flows["stream_flow"] = lambda **kwargs: child_flow
238
+
239
+ try:
240
+ ctx = FlowContext()
241
+ node = {
242
+ "id": "sub1",
243
+ "type": "subflow",
244
+ "config": {"flow_name": "stream_flow"},
245
+ }
246
+
247
+ async def collect():
248
+ events = []
249
+ async for evt in self.handler.stream(node, ctx, "sub1"):
250
+ events.append(evt)
251
+ return events
252
+
253
+ events = run(collect())
254
+ assert len(events) > 0
255
+ assert ctx.outputs["sub1"] == {"streamed": True}
256
+ finally:
257
+ registry._flows.pop("stream_flow", None)
258
+
259
+ def test_node_type(self):
260
+ assert self.handler.node_type == "subflow"
261
+
262
+
263
+ # ── 集成测试:switch + edge 路由 ──
264
+
265
+
266
+ class TestSwitchEdgeRouting:
267
+ """switch 节点与 edge 路由集成测试"""
268
+
269
+ def test_switch_routes_to_correct_branch(self):
270
+ """switch 节点路由到正确的下游节点"""
271
+ flow = Flow(
272
+ flow_id="test_flow",
273
+ name="switch_routing",
274
+ nodes=[
275
+ {
276
+ "id": "model_switch",
277
+ "type": "switch",
278
+ "config": {
279
+ "variable": "$v.model_tier",
280
+ "cases": {"strong": "agent_a", "basic": "pipeline_b"},
281
+ "default": "pipeline_b",
282
+ },
283
+ },
284
+ {
285
+ "id": "agent_a",
286
+ "type": "python",
287
+ "config": {"code": "def main(**kwargs):\n return {'path': 'strong_path'}"},
288
+ },
289
+ {
290
+ "id": "pipeline_b",
291
+ "type": "python",
292
+ "config": {"code": "def main(**kwargs):\n return {'path': 'basic_path'}"},
293
+ },
294
+ ],
295
+ edges=[
296
+ {"source": "model_switch", "target": "agent_a", "condition": "strong"},
297
+ {"source": "model_switch", "target": "pipeline_b", "condition": "basic"},
298
+ ],
299
+ )
300
+
301
+ ctx = FlowContext(variables={"model_tier": "strong"})
302
+ run(flow.run(ctx))
303
+ assert ctx.outputs["agent_a"] == {"path": "strong_path"}
304
+ assert "pipeline_b" not in ctx.outputs
305
+
306
+ def test_switch_routes_default(self):
307
+ """switch 节点 default 分支路由"""
308
+ flow = Flow(
309
+ flow_id="test_flow",
310
+ name="switch_default",
311
+ nodes=[
312
+ {
313
+ "id": "sw",
314
+ "type": "switch",
315
+ "config": {
316
+ "variable": "$v.tier",
317
+ "cases": {"a": "node_a", "b": "node_b"},
318
+ "default": "b",
319
+ },
320
+ },
321
+ {
322
+ "id": "node_a",
323
+ "type": "python",
324
+ "config": {"code": "def main(**kwargs):\n return {'path': 'a'}"},
325
+ },
326
+ {
327
+ "id": "node_b",
328
+ "type": "python",
329
+ "config": {"code": "def main(**kwargs):\n return {'path': 'b'}"},
330
+ },
331
+ ],
332
+ edges=[
333
+ {"source": "sw", "target": "node_a", "condition": "a"},
334
+ {"source": "sw", "target": "node_b", "condition": "b"},
335
+ ],
336
+ )
337
+
338
+ ctx = FlowContext(variables={"tier": "unknown"})
339
+ run(flow.run(ctx))
340
+ assert ctx.outputs["node_b"] == {"path": "b"}
@@ -1,18 +0,0 @@
1
- aio-pika>=9.6.1
2
- asyncpg>=0.30.0
3
- elasticsearch[async]>=9.3.0
4
- fastapi>=0.133.1
5
- jwcrypto>=1.5.6
6
- loguru>=0.7.3
7
- nebula3-python>=3.8.3
8
- openai>=2.28.0
9
- bcrypt>=4.0.0
10
- pycasbin>=2.8.0
11
- pydantic>=2.12.4
12
- python-multipart>=0.0.20
13
- requests>=2.32.5
14
- RestrictedPython>=7.0
15
- sqlalchemy[asyncio]>=2.0.48
16
- sqlobjects>=1.6.0
17
- tiktoken>=0.12.0
18
- uvicorn>=0.41.0
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes