alayaflow 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/autotable/1.0.0/metadata.json +9 -0
  2. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/autotable/1.0.0/requirements.txt +11 -0
  3. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/autotable/1.0.0/workflow.py +402 -0
  4. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/metadata.json +9 -0
  5. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/__init__.py +11 -0
  6. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/chat_nodes.py +22 -0
  7. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/memory_nodes.py +30 -0
  8. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/retrieval_nodes.py +11 -0
  9. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/requirements.txt +11 -0
  10. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/state.py +18 -0
  11. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/workflow.py +27 -0
  12. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat/1.0.0/metadata.json +9 -0
  13. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat/1.0.0/requirements.txt +11 -0
  14. alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat/1.0.0/workflow.py +101 -0
  15. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/.gitignore +1 -0
  16. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/.lock +0 -0
  17. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/CACHEDIR.TAG +1 -0
  18. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/alayaflow_installed.marker +1 -0
  19. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/deps_installed.marker +1 -0
  20. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-code.1 +82 -0
  21. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-detect.1 +97 -0
  22. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-explore.1 +88 -0
  23. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-help.1 +50 -0
  24. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-standardize.1 +96 -0
  25. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-view.1 +76 -0
  26. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv.1 +109 -0
  27. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/share/man/man1/ipython.1 +60 -0
  28. alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/share/man/man1/isympy.1 +188 -0
  29. alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/.gitignore +1 -0
  30. alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/.lock +0 -0
  31. alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/CACHEDIR.TAG +1 -0
  32. alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/alayaflow_installed.marker +1 -0
  33. alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/deps_installed.marker +1 -0
  34. alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/.gitignore +1 -0
  35. alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/.lock +0 -0
  36. alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/CACHEDIR.TAG +1 -0
  37. alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/alayaflow_installed.marker +1 -0
  38. alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/deps_installed.marker +1 -0
  39. alayaflow-0.1.0/.github/workflows/pr-test.yml +41 -0
  40. alayaflow-0.1.0/.gitignore +240 -0
  41. alayaflow-0.1.0/LICENSE +661 -0
  42. alayaflow-0.1.0/PKG-INFO +99 -0
  43. alayaflow-0.1.0/PYPI_UPLOAD_GUIDE.md +301 -0
  44. alayaflow-0.1.0/README.md +79 -0
  45. alayaflow-0.1.0/examples/autotable_demo.py +60 -0
  46. alayaflow-0.1.0/examples/chat_demo.py +47 -0
  47. alayaflow-0.1.0/pyproject.origin.toml +47 -0
  48. alayaflow-0.1.0/pyproject.toml +47 -0
  49. alayaflow-0.1.0/src/alayaflow/__init__.py +5 -0
  50. alayaflow-0.1.0/src/alayaflow/api/__init__.py +5 -0
  51. alayaflow-0.1.0/src/alayaflow/api/api_singleton.py +81 -0
  52. alayaflow-0.1.0/src/alayaflow/clients/alayamem/base_client.py +19 -0
  53. alayaflow-0.1.0/src/alayaflow/clients/alayamem/http_client.py +64 -0
  54. alayaflow-0.1.0/src/alayaflow/common/config.py +106 -0
  55. alayaflow-0.1.0/src/alayaflow/component/__init__.py +0 -0
  56. alayaflow-0.1.0/src/alayaflow/component/chat_model.py +20 -0
  57. alayaflow-0.1.0/src/alayaflow/component/intent_classifier.py +94 -0
  58. alayaflow-0.1.0/src/alayaflow/component/langflow/__init__.py +0 -0
  59. alayaflow-0.1.0/src/alayaflow/component/langflow/intent_classifier.py +83 -0
  60. alayaflow-0.1.0/src/alayaflow/component/llm_node.py +123 -0
  61. alayaflow-0.1.0/src/alayaflow/component/memory.py +50 -0
  62. alayaflow-0.1.0/src/alayaflow/component/retrieve_node.py +17 -0
  63. alayaflow-0.1.0/src/alayaflow/component/web_search.py +126 -0
  64. alayaflow-0.1.0/src/alayaflow/execution/__init__.py +6 -0
  65. alayaflow-0.1.0/src/alayaflow/execution/env_manager.py +424 -0
  66. alayaflow-0.1.0/src/alayaflow/execution/executor_manager.py +59 -0
  67. alayaflow-0.1.0/src/alayaflow/execution/executors/__init__.py +9 -0
  68. alayaflow-0.1.0/src/alayaflow/execution/executors/base_executor.py +9 -0
  69. alayaflow-0.1.0/src/alayaflow/execution/executors/naive_executor.py +121 -0
  70. alayaflow-0.1.0/src/alayaflow/execution/executors/uv_executor.py +125 -0
  71. alayaflow-0.1.0/src/alayaflow/execution/executors/worker_executor.py +12 -0
  72. alayaflow-0.1.0/src/alayaflow/execution/langfuse_tracing.py +104 -0
  73. alayaflow-0.1.0/src/alayaflow/execution/workflow_runner.py +98 -0
  74. alayaflow-0.1.0/src/alayaflow/utils/singleton.py +14 -0
  75. alayaflow-0.1.0/src/alayaflow/workflow/__init__.py +6 -0
  76. alayaflow-0.1.0/src/alayaflow/workflow/runnable/__init__.py +7 -0
  77. alayaflow-0.1.0/src/alayaflow/workflow/runnable/base_runnable_workflow.py +19 -0
  78. alayaflow-0.1.0/src/alayaflow/workflow/runnable/state_graph_runnable_workflow.py +23 -0
  79. alayaflow-0.1.0/src/alayaflow/workflow/workflow_info.py +50 -0
  80. alayaflow-0.1.0/src/alayaflow/workflow/workflow_loader.py +168 -0
  81. alayaflow-0.1.0/src/alayaflow/workflow/workflow_manager.py +257 -0
  82. alayaflow-0.1.0/tests/__init__.py +1 -0
  83. alayaflow-0.1.0/tests/clients/__init__.py +1 -0
  84. alayaflow-0.1.0/tests/clients/conftest.py +9 -0
  85. alayaflow-0.1.0/tests/clients/test_alayamem.py +57 -0
  86. alayaflow-0.1.0/tests/component/test_intent_classifier.py +236 -0
  87. alayaflow-0.1.0/tests/component/test_llm_node.py +313 -0
  88. alayaflow-0.1.0/tests/execution/test_env_reuse.py +243 -0
  89. alayaflow-0.1.0/tests/workflow/__init__.py +0 -0
  90. alayaflow-0.1.0/tests/workflow/conftest.py +18 -0
  91. alayaflow-0.1.0/tests/workflow/test_workflow_loader.py +36 -0
  92. alayaflow-0.1.0/uv.lock +2728 -0
@@ -0,0 +1,9 @@
1
+ {
2
+ "id": "autotable",
3
+ "name": "RAG 并发信息抽取工作流",
4
+ "description": "基于 LangGraph Map-Reduce 架构的高性能抽取流程。集成信号量限流(Semaphore)、JSON 结构化校验、文档截断及错误兜底机制。",
5
+ "version": "1.0.0",
6
+ "tags": ["rag", "extraction", "langgraph", "json-mode"],
7
+ "entry_file": "workflow.py",
8
+ "entry_point": "create_graph"
9
+ }
@@ -0,0 +1,11 @@
1
+ # LangGraph 核心依赖
2
+ langgraph>=0.2.0
3
+
4
+ # LangChain Community (用于 ChatOpenAI)
5
+ langchain-community>=0.3.0
6
+
7
+ # OpenAI SDK (DeepSeek API 兼容 OpenAI 格式)
8
+ openai>=1.0.0
9
+
10
+ # Langfuse
11
+ langfuse>=3.0.0,<4.0.0
@@ -0,0 +1,402 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from dataclasses import dataclass
5
+ from typing import Any, Dict, List, Optional, TypedDict, Annotated, Union, TypeAlias, Tuple
6
+ from collections import defaultdict
7
+ from threading import Semaphore
8
+
9
+ from langgraph.graph import StateGraph, START, END
10
+ from langgraph.types import Send
11
+ from langchain_core.runnables import RunnableConfig
12
+
13
+ from alayaflow.component.llm_node import LLMComponent, ResponseFormat
14
+ from alayaflow.clients.alayamem.http_client import HttpAlayaMemClient
15
+ from alayaflow.component.retrieve_node import RetrieveComponent
16
+
17
+
18
+ FieldSpec: TypeAlias = Union[str, Dict[str, List["FieldSpec"]]] # 递归:dict -> list[FieldSpec]
19
+
20
+ def merge_dicts(a: Dict, b: Dict) -> Dict:
21
+ return {**a, **b}
22
+
23
+ def deep_merge(a: Dict[str, Any], b: Dict[str, Any]) -> Dict[str, Any]:
24
+ out = dict(a or {})
25
+ for k, v in (b or {}).items():
26
+ if k in out and isinstance(out[k], dict) and isinstance(v, dict):
27
+ out[k] = deep_merge(out[k], v)
28
+ else:
29
+ out[k] = v
30
+ return out
31
+
32
+
33
+ @dataclass(frozen=True)
34
+ class GroupTask:
35
+ path: Tuple[str, ...] # 父路径,如 ("个人信息","联系方式");根为 ()
36
+ keys: Tuple[str, ...] # 该路径下需要抽取的叶子字段名
37
+
38
+
39
+ class OverallState(TypedDict):
40
+ fields: List[FieldSpec] # 输入模板(递归)
41
+ tasks: List[GroupTask] # 规划出的任务列表
42
+
43
+ # 调试信息:每个任务的检索片段
44
+ context_by_task: Annotated[Dict[str, List[str]], merge_dicts]
45
+
46
+ # 最终值树:通过 deep_merge reducer 并发合并 patch
47
+ final_result: Annotated[Dict[str, Any], deep_merge]
48
+
49
+ errors: Annotated[Dict[str, str], merge_dicts]
50
+
51
+
52
+ class TaskState(TypedDict):
53
+ task: GroupTask
54
+
55
+
56
+
57
+ def _as_list(x: Any) -> List[Any]:
58
+ if x is None:
59
+ return []
60
+ if isinstance(x, list):
61
+ return x
62
+ return [x]
63
+
64
+ def flatten_leaf_tasks(specs: List[FieldSpec], base_path: Optional[List[str]] = None) -> List[Tuple[Tuple[str, ...], str]]:
65
+ """
66
+ 返回:[(path_tuple, leaf_key), ...]
67
+ """
68
+ base_path = base_path or []
69
+ out: List[Tuple[Tuple[str, ...], str]] = []
70
+
71
+ for item in specs or []:
72
+ if isinstance(item, str):
73
+ out.append((tuple(base_path), item))
74
+ continue
75
+
76
+ if isinstance(item, dict):
77
+ for parent, children in item.items():
78
+ for child in _as_list(children):
79
+ if isinstance(child, str):
80
+ out.append((tuple(base_path + [parent]), child))
81
+ elif isinstance(child, dict):
82
+ out.extend(flatten_leaf_tasks([child], base_path + [parent]))
83
+ else:
84
+ pass
85
+ continue
86
+
87
+ return out
88
+
89
+ def plan_node(state: OverallState, config: RunnableConfig):
90
+ leaf = flatten_leaf_tasks(state["fields"])
91
+ grouped: Dict[Tuple[str, ...], List[str]] = defaultdict(list)
92
+ for path, key in leaf:
93
+ grouped[path].append(key)
94
+
95
+ tasks: List[GroupTask] = []
96
+ for path, keys in grouped.items():
97
+ # 去重保持顺序
98
+ seen = set()
99
+ uniq = []
100
+ for k in keys:
101
+ if k not in seen:
102
+ seen.add(k)
103
+ uniq.append(k)
104
+ tasks.append(GroupTask(path=path, keys=tuple(uniq)))
105
+
106
+ # 可选:让任务顺序稳定(不影响并发结果,只影响日志观感)
107
+ tasks.sort(key=lambda t: (len(t.path), t.path))
108
+ return {"tasks": tasks}
109
+
110
+
111
+ def map_tasks(state: OverallState):
112
+ return [Send("extract_task", {"task": t}) for t in state["tasks"]]
113
+
114
+
115
+
116
+ def make_patch(path: Tuple[str, ...], kv: Dict[str, str]) -> Dict[str, Any]:
117
+ """
118
+ path=("个人信息","联系方式"), kv={"电话":"..","邮箱":".."} =>
119
+ {"个人信息":{"联系方式":{"电话":"..","邮箱":".."}}}
120
+ """
121
+ node: Dict[str, Any] = dict(kv)
122
+ for p in reversed(path):
123
+ node = {p: node}
124
+ return node
125
+
126
+
127
+
128
+ def build_system_prompt(keys: list[str]) -> str:
129
+ keys_str = ", ".join(keys)
130
+
131
+ return f"""
132
+ 你是一个严谨的“局部字段抽取器”(table patch extractor)。
133
+
134
+ 你的任务是:**只为指定字段抽取值**,严格依据提供的知识片段,不得猜测或编造。
135
+
136
+ 通用规则:
137
+ 1. 输出必须是严格合法 JSON,不允许包含解释、Markdown、代码块或多余文本。
138
+ 2. **只允许输出以下字段(不多不少)**:{keys_str}
139
+ 3. 所有字段值必须是字符串。
140
+ 4. 找不到 / 不确定 / 空值 / 占位符 → 必须输出空字符串 ""。
141
+ 5. 字段名可能存在空格或轻微变体(如“姓 名”≈“姓名”),允许智能匹配,但不得扩展到未指定字段。
142
+
143
+ 长文本字段格式规则(必须遵守):
144
+ - 当字段内容包含**多个条目、多个时间段或多段经历**时:
145
+ - 必须使用序号列表格式。
146
+ - **每个条目占一行,条目之间必须使用 "\n" 换行符分隔。**
147
+ - 不允许使用分号、顿号、逗号等方式合并多个条目到同一行。
148
+ - 示例正确格式:
149
+ "1.第一条内容\n2.第二条内容\n3.第三条内容"
150
+
151
+ 表格单元格理解规则(重要):
152
+ - 知识片段可能来自表格,每行使用 " | " 分隔单元格。
153
+ - "<空>" 表示空单元格,对应值为 ""。
154
+ - 字段名后不一定是值:
155
+ - 若字段名后是 "<空>" → 值为 ""。
156
+ - 若字段名后是另一个字段名 → 继续向后寻找第一个“非字段名 / 非占位符”的单元格作为值。
157
+ - 示例:"字段A | 字段B | 值" → 字段A="", 字段B="值"。
158
+
159
+ 占位符识别:
160
+ - 若候选值是模板占位符或签字日期类文本
161
+ (如“签字: 年 月 日”“学院盖章: 年 月 日”等),必须返回 ""。
162
+ """.strip()
163
+
164
+
165
+ def build_user_prompt(
166
+ content_text: str,
167
+ path: list[str],
168
+ keys: list[str],
169
+ ) -> str:
170
+ path_str = " / ".join(path) if path else "<root>"
171
+ keys_str = ", ".join(keys)
172
+
173
+ json_skeleton = "{\n" + ",\n".join([f' "{k}": ""' for k in keys]) + "\n}"
174
+
175
+ return f"""
176
+ 【本次任务定位】
177
+ 字段路径(仅用于语义定位,不要输出):{path_str}
178
+ 需要抽取的字段:{keys_str}
179
+
180
+ 【知识库片段】
181
+ {content_text}
182
+
183
+ 【输出要求】
184
+ - 只输出一个 JSON 对象
185
+ - key 必须严格为:{keys_str}
186
+ - 无法确定 / 空值 / 占位符 → 输出 ""
187
+
188
+ 【JSON 输出模板】
189
+ {json_skeleton}
190
+ """.strip()
191
+
192
+
193
+ def create_extract_task_node(
194
+ client: HttpAlayaMemClient,
195
+ *,
196
+ max_concurrency: int = 10,
197
+ top_k: int = 5,
198
+ max_doc_chars: int = 400,
199
+ ):
200
+ limiter = Semaphore(max_concurrency)
201
+
202
+ def slim_docs(docs: List[str]) -> List[str]:
203
+ out = []
204
+ for d in docs or []:
205
+ s = str(d)
206
+ if len(s) > max_doc_chars:
207
+ s = s[:max_doc_chars] + "…"
208
+ out.append(s)
209
+ return out
210
+
211
+ def node(state: TaskState, config: RunnableConfig):
212
+ task = state["task"]
213
+ path = task.path
214
+ keys = list(task.keys)
215
+
216
+ task_id = f"{'/'.join(path) or '<root>'}:{','.join(keys)}"
217
+
218
+ # 默认 patch:保证结构稳定(缺失也填空)
219
+ default_kv = {k: "" for k in keys}
220
+ default_patch = make_patch(path, default_kv)
221
+
222
+ try:
223
+ with limiter:
224
+ # 从 config 中获取 collection_name(运行时参数)
225
+ config_dict = config.get("configurable", {}) if isinstance(config, dict) else {}
226
+ collection_name = config_dict.get("collection_name", "file_watcher_collection")
227
+
228
+ # 1) 检索 query:路径信息 + keys
229
+ # path 越深,越应该把上层标题带进去提升命中
230
+ query_parts = list(path) + keys
231
+ query = ";".join([p for p in query_parts if p])
232
+
233
+ retrieve_component = RetrieveComponent(client=client)
234
+ docs = retrieve_component(query=query, collection_name=collection_name, limit=top_k)
235
+ docs = slim_docs(docs)
236
+
237
+ # 没 docs:直接返回默认
238
+ if not docs:
239
+ return {
240
+ "context_by_task": {task_id: []},
241
+ "final_result": default_patch,
242
+ }
243
+
244
+ formatted_context = "\n\n".join(
245
+ [f"片段 {i+1}: {doc}" for i, doc in enumerate(docs)]
246
+ )
247
+
248
+ # 2) 一次性抽取 keys(严格 JSON object)
249
+ json_skeleton = "{\n" + ",\n".join([f' "{k}": ""' for k in keys]) + "\n}"
250
+
251
+ system_prompt = build_system_prompt(keys)
252
+
253
+ user_prompt = build_user_prompt(formatted_context, path, keys)
254
+
255
+ llm = LLMComponent(
256
+ model_name="deepseek-chat",
257
+ system_prompt=system_prompt,
258
+ prompt=user_prompt,
259
+ response_format=ResponseFormat.JSON,
260
+ temperature=0.0,
261
+ )
262
+
263
+ msg = llm()
264
+ obj = json.loads(msg.content)
265
+
266
+ extracted = {}
267
+ for k in keys:
268
+ v = obj.get(k, "")
269
+ extracted[k] = (str(v).strip() if v is not None else "")
270
+
271
+ patch = make_patch(path, extracted)
272
+
273
+ return {
274
+ "context_by_task": {task_id: docs},
275
+ "final_result": patch,
276
+ }
277
+
278
+ except Exception as e:
279
+ return {
280
+ "context_by_task": {task_id: []},
281
+ "final_result": default_patch,
282
+ "errors": {task_id: f"{type(e).__name__}: {e}"},
283
+ }
284
+
285
+ return node
286
+
287
+
288
+
289
+ def validate_node(state: OverallState, config: RunnableConfig):
290
+ # 简单缺失检查:把 tasks 展开期望字段,看看 final_result 是否为空
291
+ res = state.get("final_result", {}) or {}
292
+ missing = []
293
+
294
+ def get_in(d: Dict[str, Any], path: Tuple[str, ...]) -> Dict[str, Any]:
295
+ cur = d
296
+ for p in path:
297
+ if not isinstance(cur, dict):
298
+ return {}
299
+ cur = cur.get(p, {})
300
+ return cur if isinstance(cur, dict) else {}
301
+
302
+ for t in state["tasks"]:
303
+ scope = get_in(res, t.path)
304
+ for k in t.keys:
305
+ if not str(scope.get(k, "")).strip():
306
+ missing.append((".".join(t.path + (k,))) if t.path else k)
307
+
308
+ if missing:
309
+ return {"errors": {"__missing__": ";".join(missing)}}
310
+ return {}
311
+
312
+
313
+ # -------------------------
314
+ # Build graph
315
+ # -------------------------
316
+ def create_graph(init_args: Dict[str, Any]):
317
+ client = HttpAlayaMemClient(init_args["alayamem_url"])
318
+ g = StateGraph(OverallState)
319
+
320
+ g.add_node("plan", plan_node)
321
+ g.add_node("extract_task", create_extract_task_node(client, max_concurrency=10, top_k=3))
322
+ g.add_node("validate", validate_node)
323
+
324
+ g.add_edge(START, "plan")
325
+ g.add_conditional_edges("plan", map_tasks, ["extract_task"])
326
+ g.add_edge("extract_task", "validate")
327
+ g.add_edge("validate", END)
328
+
329
+ return g.compile()
330
+
331
+
332
+ if __name__ == "__main__":
333
+ app = create_graph({"alayamem_url": "http://10.16.70.46:5555"})
334
+
335
+ input_data: OverallState = {
336
+ "fields": [
337
+ {
338
+ "申请人信息": [
339
+ "姓名",
340
+ "性别",
341
+ "出生年月",
342
+ "民族",
343
+ "学位",
344
+ "职称",
345
+ "是否在站博士后",
346
+ "电子邮箱",
347
+ "办公电话",
348
+ "国别或地区",
349
+ "申请人类别",
350
+ "工作单位",
351
+ "主要研究领域"
352
+ ]
353
+ },
354
+ {
355
+ "依托单位信息": [
356
+ "名称",
357
+ "联系人",
358
+ "电子邮箱",
359
+ "电话",
360
+ "网站地址"
361
+ ]
362
+ },
363
+ {
364
+ "合作研究单位信息": [
365
+ "单位名称"
366
+ ]
367
+ },
368
+ {
369
+ "项目基本信息": [
370
+ "项目名称",
371
+ "英文名称",
372
+ "资助类别",
373
+ "亚类说明",
374
+ "附注说明",
375
+ "申请代码",
376
+ "研究期限",
377
+ "研究方向",
378
+ "申请资助经费",
379
+ "研究属性",
380
+ "中文关键词",
381
+ "英文关键词"
382
+ ]
383
+ },
384
+ "中文摘要",
385
+ "英文摘要"
386
+ ],
387
+ "tasks": [],
388
+ "context_by_task": {},
389
+ "final_result": {},
390
+ "errors": {},
391
+ }
392
+
393
+ config = {
394
+ "configurable": {
395
+ "collection_name": "file_watcher_collection",
396
+ }
397
+ }
398
+ out = app.invoke(input_data, config=config)
399
+ print("final_result:")
400
+ print(json.dumps(out["final_result"], ensure_ascii=False, indent=2))
401
+ print("\nerrors:")
402
+ print(json.dumps(out["errors"], ensure_ascii=False, indent=2))
@@ -0,0 +1,9 @@
1
+ {
2
+ "id": "python_import",
3
+ "name": "DeepSeek Chatbot",
4
+ "description": "一个最简的 DeepSeek 对话工作流示例",
5
+ "version": "1.0.0",
6
+ "tags": ["chat", "deepseek", "basic"],
7
+ "entry_file": "workflow.py",
8
+ "entry_point": "create_graph"
9
+ }
@@ -0,0 +1,11 @@
1
+ from .memory_nodes import wrap_init_memory, wrap_query_message, wrap_add_message
2
+ from .retrieval_nodes import wrap_query_vdb_message
3
+ from .chat_nodes import wrap_deepseek_chat
4
+
5
+ __all__ = [
6
+ "wrap_init_memory",
7
+ "wrap_query_message",
8
+ "wrap_add_message",
9
+ "wrap_query_vdb_message",
10
+ "wrap_deepseek_chat",
11
+ ]
@@ -0,0 +1,22 @@
1
+ from langchain_core.messages import SystemMessage
2
+
3
+ from alayaflow.component.chat_model import mk_chat_model_deepseek
4
+ from ..state import WorkflowState
5
+
6
+ chat_model = mk_chat_model_deepseek()
7
+
8
+ def wrap_deepseek_chat(state: WorkflowState) -> WorkflowState:
9
+ messages = state["messages"].copy()
10
+ updated_state = state.copy()
11
+
12
+ retrieved_docs = state.get("retrieved_docs", [])
13
+ if retrieved_docs:
14
+ context_text = "\n\n".join([str(doc) for doc in retrieved_docs])
15
+ context_message = SystemMessage(
16
+ content=f"以下是相关的参考资料,请基于这些资料回答用户的问题:\n\n{context_text}"
17
+ )
18
+ messages.insert(0, context_message)
19
+
20
+ response = chat_model.invoke(messages)
21
+ updated_state['chat_response'] = response
22
+ return updated_state
@@ -0,0 +1,30 @@
1
+ from alayaflow.component.memory import init_memory, query_message, add_message
2
+ from ..state import WorkflowState
3
+
4
+
5
+ def wrap_init_memory(state: WorkflowState) -> WorkflowState:
6
+ user_id = state["user_id"]
7
+ session_id = state["session_id"]
8
+
9
+ original_result = init_memory(user_id, session_id)
10
+
11
+ updated_state = state.copy()
12
+ # updated_state["memory_initialized"] = original_result["memory_initialized"]
13
+
14
+ return updated_state
15
+
16
+
17
+ def wrap_query_message(state: WorkflowState) -> WorkflowState:
18
+ user_id = state["user_id"]
19
+ session_id = state["session_id"]
20
+ messages = state.get("messages", [])
21
+ query_message(user_id, session_id, messages)
22
+ return state.copy()
23
+
24
+
25
+ def wrap_add_message(state: WorkflowState) -> WorkflowState:
26
+ user_id = state["user_id"]
27
+ session_id = state["session_id"]
28
+ messages = state.get("messages", [])
29
+ add_message(user_id, session_id, messages)
30
+ return state.copy()
@@ -0,0 +1,11 @@
1
+ from alayaflow.component.memory import query_vdb_message
2
+ from ..state import WorkflowState
3
+
4
+
5
+ def wrap_query_vdb_message(state: WorkflowState) -> WorkflowState:
6
+ messages = state.get("messages", [])
7
+ limit = state.get("limit", 5)
8
+ result = query_vdb_message(messages, limit)
9
+ updated_state = state.copy()
10
+ updated_state["retrieved_docs"] = result.get("vdb_results", [])
11
+ return updated_state
@@ -0,0 +1,11 @@
1
+ # LangGraph 核心依赖
2
+ langgraph>=0.2.0
3
+
4
+ # LangChain Community (用于 ChatOpenAI)
5
+ langchain-community>=0.3.0
6
+
7
+ # OpenAI SDK (DeepSeek API 兼容 OpenAI 格式)
8
+ openai>=1.0.0
9
+
10
+ # Langfuse
11
+ langfuse>=3.0.0,<4.0.0
@@ -0,0 +1,18 @@
1
+ from typing import TypedDict, List, Optional, ClassVar
2
+
3
+ from langchain_core.messages import BaseMessage, AIMessageChunk
4
+
5
+
6
+ class WorkflowState(TypedDict):
7
+ user_id: str
8
+ session_id: str
9
+ messages: List[BaseMessage]
10
+ memory_initialized: Optional[bool]
11
+ stream_chunks: List[AIMessageChunk]
12
+ chat_response: Optional[dict]
13
+ retrieved_docs: Optional[List[str]]
14
+ context: Optional[str]
15
+
16
+
17
+ def get_state_schema() -> ClassVar[WorkflowState]:
18
+ return WorkflowState
@@ -0,0 +1,27 @@
1
+ from langgraph.graph import StateGraph, START, END
2
+
3
+ from .state import WorkflowState, get_state_schema
4
+ from .nodes import (
5
+ wrap_init_memory,
6
+ wrap_query_vdb_message,
7
+ wrap_deepseek_chat,
8
+ wrap_add_message,
9
+ )
10
+
11
+ def create_graph(init_args):
12
+ builder = StateGraph(WorkflowState)
13
+
14
+ # 添加节点
15
+ builder.add_node("init_memory", wrap_init_memory)
16
+ builder.add_node("wrap_query_vdb_message", wrap_query_vdb_message)
17
+ builder.add_node("chatbot", wrap_deepseek_chat)
18
+ builder.add_node("add_message", wrap_add_message)
19
+
20
+ # 定义边
21
+ builder.add_edge(START, "init_memory")
22
+ builder.add_edge("init_memory", "wrap_query_vdb_message")
23
+ builder.add_edge("wrap_query_vdb_message", "chatbot")
24
+ builder.add_edge("chatbot", "add_message")
25
+ builder.add_edge("add_message", END)
26
+
27
+ return builder.compile()
@@ -0,0 +1,9 @@
1
+ {
2
+ "id": "test_chat_123",
3
+ "name": "DeepSeek Chatbot",
4
+ "description": "一个最简的 DeepSeek 对话工作流示例",
5
+ "version": "1.0.0",
6
+ "tags": ["chat", "deepseek", "basic"],
7
+ "entry_file": "workflow.py",
8
+ "entry_point": "create_graph"
9
+ }
@@ -0,0 +1,11 @@
1
+ # LangGraph 核心依赖
2
+ langgraph>=0.2.0
3
+
4
+ # LangChain Community (用于 ChatOpenAI)
5
+ langchain-community>=0.3.0
6
+
7
+ # OpenAI SDK (DeepSeek API 兼容 OpenAI 格式)
8
+ openai>=1.0.0
9
+
10
+ # Langfuse
11
+ langfuse>=3.0.0,<4.0.0