alayaflow 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/alayamem_chat/1.0.0/metadata.json +9 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/alayamem_chat/1.0.0/schemas.py +61 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/alayamem_chat/1.0.0/workflow.py +246 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/metadata.json +9 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/metadata.py +17 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/requirements.txt +14 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/schemas.py +44 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/utils.py +35 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/autotable/1.0.0/workflow.py +267 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/metadata.py +16 -0
- alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/workflow.py +49 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/PKG-INFO +1 -1
- alayaflow-0.1.4/examples/alayamem_demo.py +142 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/examples/autotable_demo.py +26 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/examples/chat_demo.py +8 -1
- {alayaflow-0.1.2 → alayaflow-0.1.4}/pyproject.toml +1 -1
- alayaflow-0.1.4/src/alayaflow/__init__.py +7 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/api/api_singleton.py +27 -2
- alayaflow-0.1.4/src/alayaflow/clients/alayamem/base_client.py +31 -0
- alayaflow-0.1.4/src/alayaflow/clients/alayamem/http_client.py +87 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/common/config.py +10 -8
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/llm_node.py +4 -5
- alayaflow-0.1.4/src/alayaflow/component/memory.py +52 -0
- alayaflow-0.1.4/src/alayaflow/component/retrieve_node.py +77 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/executor_manager.py +19 -1
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/executors/base_executor.py +5 -1
- alayaflow-0.1.4/src/alayaflow/execution/executors/naive_executor.py +81 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/executors/uv_executor.py +10 -1
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/executors/worker_executor.py +3 -1
- alayaflow-0.1.4/src/alayaflow/utils/coroutine.py +20 -0
- alayaflow-0.1.4/src/alayaflow/utils/logger.py +79 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/uv.lock +8 -19
- alayaflow-0.1.2/.alaya.ai/alayaflow/workflows/autotable/1.0.0/metadata.json +0 -9
- alayaflow-0.1.2/.alaya.ai/alayaflow/workflows/autotable/1.0.0/workflow.py +0 -400
- alayaflow-0.1.2/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/workflow.py +0 -94
- alayaflow-0.1.2/src/alayaflow/__init__.py +0 -5
- alayaflow-0.1.2/src/alayaflow/clients/alayamem/base_client.py +0 -19
- alayaflow-0.1.2/src/alayaflow/clients/alayamem/http_client.py +0 -64
- alayaflow-0.1.2/src/alayaflow/component/memory.py +0 -50
- alayaflow-0.1.2/src/alayaflow/component/retrieve_node.py +0 -11
- alayaflow-0.1.2/src/alayaflow/execution/executors/naive_executor.py +0 -119
- alayaflow-0.1.2/tests/clients/__init__.py +0 -1
- alayaflow-0.1.2/tests/clients/conftest.py +0 -9
- alayaflow-0.1.2/tests/clients/test_alayamem.py +0 -57
- {alayaflow-0.1.2/.alaya.ai/alayaflow/workflows/simple_chat → alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/alayamem_chat}/1.0.0/metadata.py +0 -0
- {alayaflow-0.1.2/.alaya.ai/alayaflow/workflows/autotable → alayaflow-0.1.4/.alaya.ai/alayaflow/workflows/alayamem_chat}/1.0.0/requirements.txt +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/metadata.json +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/requirements.txt +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/schemas.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/.github/workflows/pr-test.yml +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/.gitignore +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/LICENSE +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/README.md +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/pyproject.origin.toml +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/api/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/chat_model.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/intent_classifier.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/langflow/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/langflow/intent_classifier.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/model/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/model/model_manager.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/model/schemas.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/search_node.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/component/web_search.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/env_manager.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/executors/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/langfuse_tracing.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/execution/workflow_runner.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/utils/singleton.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/runnable/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/runnable/base_runnable_workflow.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/runnable/state_graph_runnable_workflow.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/workflow_info.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/workflow_loader.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/src/alayaflow/workflow/workflow_manager.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/component/test_intent_classifier.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/component/test_llm_node.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/execution/test_env_reuse.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/workflow/__init__.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/workflow/conftest.py +0 -0
- {alayaflow-0.1.2 → alayaflow-0.1.4}/tests/workflow/test_workflow_loader.py +0 -0
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from typing import TypedDict, List, Optional
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import BaseMessage, AIMessageChunk
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class WorkflowInitArgs(BaseModel):
|
|
9
|
+
alayamem_url: str = Field(..., description="AlayaMem URL")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Input(BaseModel):
|
|
13
|
+
messages: List[BaseMessage] = Field(..., description="List of input messages")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class WorkflowContext(BaseModel):
|
|
17
|
+
user_id: str = Field(..., description="User ID")
|
|
18
|
+
session_id: str = Field(..., description="Session ID")
|
|
19
|
+
chat_model_id: str = Field(..., description="Chat Model ID")
|
|
20
|
+
|
|
21
|
+
system_prompt: str = Field(
|
|
22
|
+
default="你是一个有帮助的AI助手。",
|
|
23
|
+
description="系统基础提示词"
|
|
24
|
+
)
|
|
25
|
+
user_profile_prompt: str = Field(
|
|
26
|
+
default="## 用户画像\n{user_profile}",
|
|
27
|
+
description="用户画像提示词模板,使用 {user_profile} 作为占位符"
|
|
28
|
+
)
|
|
29
|
+
session_summary_prompt: str = Field(
|
|
30
|
+
default="## 会话摘要\n{session_summary}",
|
|
31
|
+
description="会话摘要提示词模板,使用 {session_summary} 作为占位符"
|
|
32
|
+
)
|
|
33
|
+
retrieved_docs_prompt: str = Field(
|
|
34
|
+
default="## 相关参考资料\n{retrieved_docs}",
|
|
35
|
+
description="检索文档提示词模板,使用 {retrieved_docs} 作为占位符"
|
|
36
|
+
)
|
|
37
|
+
history_turns_prompt: str = Field(
|
|
38
|
+
default="## 历史对话\n{history_turns}",
|
|
39
|
+
description="历史对话提示词模板,使用 {history_turns} 作为占位符"
|
|
40
|
+
)
|
|
41
|
+
context_wrapper_prompt: str = Field(
|
|
42
|
+
default="# 上下文信息\n\n{context_parts}\n\n请基于以上信息来回答用户的问题。",
|
|
43
|
+
description="上下文包装提示词模板,使用 {context_parts} 作为占位符"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class Output(BaseModel):
|
|
48
|
+
chat_response: dict = Field(..., description="Chat response")
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class WorkflowState(TypedDict):
|
|
52
|
+
messages: List[BaseMessage]
|
|
53
|
+
memory_initialized: bool
|
|
54
|
+
retrieved_docs: Optional[List[str]]
|
|
55
|
+
stream_chunks: List[AIMessageChunk]
|
|
56
|
+
chat_response: Optional[dict]
|
|
57
|
+
context: Optional[str]
|
|
58
|
+
# 新增字段
|
|
59
|
+
history_turns: Optional[List[dict]]
|
|
60
|
+
session_summary: Optional[str]
|
|
61
|
+
user_profile: Optional[dict]
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
from langgraph.graph import StateGraph, START, END
|
|
2
|
+
from langgraph.runtime import Runtime
|
|
3
|
+
|
|
4
|
+
from alayaflow.component.memory import query_file, commit_turn, turns, summary, profile
|
|
5
|
+
from alayaflow.component.model import ModelManager
|
|
6
|
+
|
|
7
|
+
from .schemas import WorkflowInitArgs, WorkflowState, WorkflowContext, Input, Output
|
|
8
|
+
|
|
9
|
+
def mk_turns_node(alayamem_url: str):
|
|
10
|
+
def turns_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
11
|
+
session_id = runtime.context.session_id
|
|
12
|
+
user_id = runtime.context.user_id
|
|
13
|
+
original_result = turns(alayamem_url, session_id, user_id)
|
|
14
|
+
|
|
15
|
+
# 打印返回字段
|
|
16
|
+
print(f"\n[1. turns 接口返回]")
|
|
17
|
+
print(f" 返回类型: {type(original_result)}")
|
|
18
|
+
print(f" 返回值: {original_result}")
|
|
19
|
+
|
|
20
|
+
updated_state = state.copy()
|
|
21
|
+
# turns 接口直接返回列表
|
|
22
|
+
updated_state["history_turns"] = original_result if isinstance(original_result, list) else []
|
|
23
|
+
return updated_state
|
|
24
|
+
return turns_node
|
|
25
|
+
|
|
26
|
+
def mk_query_file_node(alayamem_url: str):
|
|
27
|
+
def query_file_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
28
|
+
user_id = runtime.context.user_id
|
|
29
|
+
messages = state.get("messages", [])
|
|
30
|
+
message = messages[-1] if messages else None
|
|
31
|
+
original_result = query_file(alayamem_url, user_id, message)
|
|
32
|
+
|
|
33
|
+
# 打印返回字段
|
|
34
|
+
print(f"\n[2. query_file 接口返回]")
|
|
35
|
+
print(f" 返回类型: {type(original_result)}")
|
|
36
|
+
if isinstance(original_result, dict):
|
|
37
|
+
print(f" 字段列表: {list(original_result.keys())}")
|
|
38
|
+
print(f" 返回值: {original_result}")
|
|
39
|
+
|
|
40
|
+
updated_state = state.copy()
|
|
41
|
+
# query_file 返回 dict,results 是 dict,包含 documents 字段
|
|
42
|
+
if isinstance(original_result, dict):
|
|
43
|
+
results = original_result.get("results", {})
|
|
44
|
+
updated_state["retrieved_docs"] = results.get("documents", []) if isinstance(results, dict) else []
|
|
45
|
+
else:
|
|
46
|
+
updated_state["retrieved_docs"] = []
|
|
47
|
+
return updated_state
|
|
48
|
+
return query_file_node
|
|
49
|
+
|
|
50
|
+
def mk_summary_node(alayamem_url: str):
|
|
51
|
+
def summary_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
52
|
+
session_id = runtime.context.session_id
|
|
53
|
+
user_id = runtime.context.user_id
|
|
54
|
+
original_result = summary(alayamem_url, session_id, user_id)
|
|
55
|
+
|
|
56
|
+
# 打印返回字段
|
|
57
|
+
print(f"\n[2. summary 接口返回]")
|
|
58
|
+
print(f" 返回类型: {type(original_result)}")
|
|
59
|
+
if isinstance(original_result, dict):
|
|
60
|
+
print(f" 字段列表: {list(original_result.keys())}")
|
|
61
|
+
print(f" 返回值: {original_result}")
|
|
62
|
+
|
|
63
|
+
updated_state = state.copy()
|
|
64
|
+
# summary 可能是 None,需要处理
|
|
65
|
+
if isinstance(original_result, dict):
|
|
66
|
+
summary_value = original_result.get("summary", "")
|
|
67
|
+
updated_state["session_summary"] = summary_value if summary_value is not None else ""
|
|
68
|
+
else:
|
|
69
|
+
updated_state["session_summary"] = ""
|
|
70
|
+
return updated_state
|
|
71
|
+
return summary_node
|
|
72
|
+
|
|
73
|
+
def mk_profile_node(alayamem_url: str):
|
|
74
|
+
def profile_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
75
|
+
user_id = runtime.context.user_id
|
|
76
|
+
original_result = profile(alayamem_url, user_id)
|
|
77
|
+
|
|
78
|
+
# 打印返回字段
|
|
79
|
+
print(f"\n[3. profile 接口返回]")
|
|
80
|
+
print(f" 返回类型: {type(original_result)}")
|
|
81
|
+
if isinstance(original_result, dict):
|
|
82
|
+
print(f" 字段列表: {list(original_result.keys())}")
|
|
83
|
+
print(f" 返回值: {original_result}")
|
|
84
|
+
|
|
85
|
+
updated_state = state.copy()
|
|
86
|
+
# profile 可能是 None,需要处理
|
|
87
|
+
if isinstance(original_result, dict):
|
|
88
|
+
profile_value = original_result.get("profile", {})
|
|
89
|
+
updated_state["user_profile"] = profile_value if profile_value is not None else {}
|
|
90
|
+
else:
|
|
91
|
+
updated_state["user_profile"] = {}
|
|
92
|
+
return updated_state
|
|
93
|
+
return profile_node
|
|
94
|
+
|
|
95
|
+
def mk_commit_turn_node(alayamem_url: str):
|
|
96
|
+
def commit_turn_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
97
|
+
session_id = runtime.context.session_id
|
|
98
|
+
user_id = runtime.context.user_id
|
|
99
|
+
messages = state.get("messages", [])
|
|
100
|
+
chat_response = state.get("chat_response")
|
|
101
|
+
|
|
102
|
+
# 提取用户消息和助手回复
|
|
103
|
+
user_text = ""
|
|
104
|
+
if messages:
|
|
105
|
+
last_msg = messages[-1]
|
|
106
|
+
if hasattr(last_msg, 'content'):
|
|
107
|
+
user_text = last_msg.content
|
|
108
|
+
elif isinstance(last_msg, dict):
|
|
109
|
+
user_text = last_msg.get('content', '')
|
|
110
|
+
|
|
111
|
+
assistant_text = ""
|
|
112
|
+
if chat_response:
|
|
113
|
+
if hasattr(chat_response, 'content'):
|
|
114
|
+
assistant_text = chat_response.content
|
|
115
|
+
elif isinstance(chat_response, dict):
|
|
116
|
+
assistant_text = chat_response.get('content', '')
|
|
117
|
+
|
|
118
|
+
# 调用 commit_turn
|
|
119
|
+
original_result = commit_turn(alayamem_url, session_id, user_text,
|
|
120
|
+
assistant_text, user_id, window_size=3)
|
|
121
|
+
|
|
122
|
+
# 打印返回字段
|
|
123
|
+
print(f"\n[4. commit_turn 接口返回]")
|
|
124
|
+
print(f" 返回类型: {type(original_result)}")
|
|
125
|
+
if isinstance(original_result, dict):
|
|
126
|
+
print(f" 字段列表: {list(original_result.keys())}")
|
|
127
|
+
print(f" 返回值: {original_result}")
|
|
128
|
+
|
|
129
|
+
return state.copy()
|
|
130
|
+
return commit_turn_node
|
|
131
|
+
|
|
132
|
+
def mk_chat_node():
|
|
133
|
+
model_manager = ModelManager()
|
|
134
|
+
|
|
135
|
+
def chat_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
136
|
+
model_id = runtime.context.chat_model_id
|
|
137
|
+
chat_model = model_manager.get_model(model_id)
|
|
138
|
+
if not chat_model:
|
|
139
|
+
raise ValueError(f"无法找到模型ID为 '{model_id}' 的模型配置")
|
|
140
|
+
|
|
141
|
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
|
142
|
+
|
|
143
|
+
# 获取 prompt 模板配置
|
|
144
|
+
ctx = runtime.context
|
|
145
|
+
system_prompt = ctx.system_prompt
|
|
146
|
+
user_profile_prompt = ctx.user_profile_prompt
|
|
147
|
+
session_summary_prompt = ctx.session_summary_prompt
|
|
148
|
+
retrieved_docs_prompt = ctx.retrieved_docs_prompt
|
|
149
|
+
history_turns_prompt = ctx.history_turns_prompt
|
|
150
|
+
context_wrapper_prompt = ctx.context_wrapper_prompt
|
|
151
|
+
|
|
152
|
+
# 构建完整的消息列表
|
|
153
|
+
full_messages = []
|
|
154
|
+
|
|
155
|
+
# 添加系统基础提示词
|
|
156
|
+
if system_prompt:
|
|
157
|
+
full_messages.append(SystemMessage(content=system_prompt))
|
|
158
|
+
|
|
159
|
+
# 构建上下文信息部分
|
|
160
|
+
context_parts = []
|
|
161
|
+
|
|
162
|
+
# 添加用户画像
|
|
163
|
+
# user_profile = state.get("user_profile", {})
|
|
164
|
+
# if user_profile:
|
|
165
|
+
# profile_text = user_profile_prompt.format(user_profile=str(user_profile))
|
|
166
|
+
# context_parts.append(profile_text)
|
|
167
|
+
|
|
168
|
+
# 添加会话摘要
|
|
169
|
+
session_summary = state.get("session_summary", "")
|
|
170
|
+
if session_summary:
|
|
171
|
+
summary_text = session_summary_prompt.format(session_summary=session_summary)
|
|
172
|
+
context_parts.append(summary_text)
|
|
173
|
+
|
|
174
|
+
# 添加检索文档
|
|
175
|
+
retrieved_docs = state.get("retrieved_docs", [])
|
|
176
|
+
if retrieved_docs:
|
|
177
|
+
docs_content = "\n\n".join([str(doc) for doc in retrieved_docs])
|
|
178
|
+
docs_text = retrieved_docs_prompt.format(retrieved_docs=docs_content)
|
|
179
|
+
context_parts.append(docs_text)
|
|
180
|
+
|
|
181
|
+
# 添加历史对话
|
|
182
|
+
history_turns = state.get("history_turns", [])
|
|
183
|
+
if history_turns:
|
|
184
|
+
history_content = "\n".join([
|
|
185
|
+
f"用户: {turn.get('user_text', '')}\n助手: {turn.get('assistant_text', '')}"
|
|
186
|
+
for turn in history_turns
|
|
187
|
+
])
|
|
188
|
+
history_text = history_turns_prompt.format(history_turns=history_content)
|
|
189
|
+
context_parts.append(history_text)
|
|
190
|
+
|
|
191
|
+
# 如果有上下文信息,添加上下文系统消息
|
|
192
|
+
if context_parts:
|
|
193
|
+
context_content = context_wrapper_prompt.format(
|
|
194
|
+
context_parts="\n\n".join(context_parts)
|
|
195
|
+
)
|
|
196
|
+
full_messages.append(SystemMessage(content=context_content))
|
|
197
|
+
|
|
198
|
+
# 添加当前用户消息
|
|
199
|
+
current_messages = state["messages"]
|
|
200
|
+
full_messages.extend(current_messages)
|
|
201
|
+
|
|
202
|
+
# 打印最终输入到模型的消息
|
|
203
|
+
print(f"\n[5. 模型输入消息]")
|
|
204
|
+
print(f" 消息数量: {len(full_messages)}")
|
|
205
|
+
print("-" * 50)
|
|
206
|
+
full_content = ""
|
|
207
|
+
for msg in full_messages:
|
|
208
|
+
msg_type = type(msg).__name__
|
|
209
|
+
content = msg.content if hasattr(msg, 'content') else str(msg)
|
|
210
|
+
full_content += f"[{msg_type}]\n{content}\n\n"
|
|
211
|
+
print(full_content)
|
|
212
|
+
print("-" * 50)
|
|
213
|
+
|
|
214
|
+
# 调用模型
|
|
215
|
+
response = chat_model.invoke(full_messages)
|
|
216
|
+
|
|
217
|
+
updated_state = state.copy()
|
|
218
|
+
updated_state['chat_response'] = response
|
|
219
|
+
return updated_state
|
|
220
|
+
return chat_node
|
|
221
|
+
|
|
222
|
+
def create_graph(init_args: WorkflowInitArgs | dict):
|
|
223
|
+
if isinstance(init_args, dict):
|
|
224
|
+
init_args = WorkflowInitArgs(**init_args)
|
|
225
|
+
alayamem_url = init_args.alayamem_url
|
|
226
|
+
|
|
227
|
+
graph = StateGraph(WorkflowState, WorkflowContext, input_type=Input, output_type=Output)
|
|
228
|
+
|
|
229
|
+
# 添加节点
|
|
230
|
+
graph.add_node("turns_node", mk_turns_node(alayamem_url))
|
|
231
|
+
graph.add_node("query_file_node", mk_query_file_node(alayamem_url))
|
|
232
|
+
#
|
|
233
|
+
graph.add_node("summary_node", mk_summary_node(alayamem_url))
|
|
234
|
+
graph.add_node("profile_node", mk_profile_node(alayamem_url))
|
|
235
|
+
graph.add_node("chat_node", mk_chat_node())
|
|
236
|
+
graph.add_node("commit_turn_node", mk_commit_turn_node(alayamem_url))
|
|
237
|
+
|
|
238
|
+
# 添加边:获取历史 -> 获取摘要 -> 获取画像 -> 聊天 -> 提交记录
|
|
239
|
+
graph.add_edge(START, "turns_node")
|
|
240
|
+
graph.add_edge("turns_node", "summary_node")
|
|
241
|
+
graph.add_edge("summary_node", "profile_node")
|
|
242
|
+
graph.add_edge("profile_node", "chat_node")
|
|
243
|
+
graph.add_edge("chat_node", "commit_turn_node")
|
|
244
|
+
graph.add_edge("commit_turn_node", END)
|
|
245
|
+
|
|
246
|
+
return graph.compile()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from alayaflow.workflow import WorkflowInfo
|
|
4
|
+
|
|
5
|
+
def get_metadata():
|
|
6
|
+
meta = {
|
|
7
|
+
"id": "autotable",
|
|
8
|
+
"name": "Auto Table Filler",
|
|
9
|
+
"description": "基于知识库的智能表格自动填写工作流",
|
|
10
|
+
"version": "1.0.0",
|
|
11
|
+
"tags": ["extraction", "table", "rag"],
|
|
12
|
+
"entry_file": "workflow.py",
|
|
13
|
+
"entry_point": "create_graph",
|
|
14
|
+
"wf_dir": Path(__file__).parent
|
|
15
|
+
}
|
|
16
|
+
return WorkflowInfo(**meta)
|
|
17
|
+
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# LangGraph 核心依赖
|
|
2
|
+
langgraph>=0.2.0
|
|
3
|
+
|
|
4
|
+
# LangChain Core
|
|
5
|
+
langchain-core>=0.3.0
|
|
6
|
+
|
|
7
|
+
# LangChain Community (用于 ChatOpenAI)
|
|
8
|
+
langchain-community>=0.3.0
|
|
9
|
+
|
|
10
|
+
# OpenAI SDK (DeepSeek API 兼容 OpenAI 格式)
|
|
11
|
+
openai>=1.0.0
|
|
12
|
+
|
|
13
|
+
# Langfuse (可选,用于追踪)
|
|
14
|
+
langfuse>=3.0.0,<4.0.0
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Any, Union, TypeAlias, TypedDict, Annotated
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
from .utils import merge_dicts, deep_merge
|
|
7
|
+
|
|
8
|
+
FieldSpec: TypeAlias = Union[str, Dict[str, List[Any]]]
|
|
9
|
+
|
|
10
|
+
@dataclass(frozen=True)
|
|
11
|
+
class GroupTask:
|
|
12
|
+
path: tuple[str, ...]
|
|
13
|
+
keys: tuple[str, ...]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class WorkflowInitArgs(BaseModel):
|
|
17
|
+
alayamem_url: str = Field(..., description="AlayaMem 服务地址")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Input(BaseModel):
|
|
21
|
+
fields: List[FieldSpec] = Field(..., description="要填写的表格字段模板(支持嵌套结构)")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class WorkflowContext(BaseModel):
|
|
25
|
+
collection_name: str = Field(default="file_watcher_collection", description="检索的集合名称")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Output(BaseModel):
|
|
29
|
+
final_result: Dict[str, Any] = Field(..., description="填写完成的表格数据(嵌套结构)")
|
|
30
|
+
context_by_task: Dict[str, List[str]] = Field(default_factory=dict, description="每个任务检索到的文档片段(调试用)")
|
|
31
|
+
errors: Dict[str, str] = Field(default_factory=dict, description="错误信息")
|
|
32
|
+
tasks: List[GroupTask] = Field(default_factory=list, description="规划的任务列表")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class OverallState(TypedDict):
|
|
36
|
+
fields: List[FieldSpec]
|
|
37
|
+
final_result: Annotated[Dict[str, Any], deep_merge]
|
|
38
|
+
context_by_task: Annotated[Dict[str, List[str]], merge_dicts]
|
|
39
|
+
errors: Annotated[Dict[str, str], merge_dicts]
|
|
40
|
+
tasks: List[GroupTask]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class TaskState(TypedDict):
|
|
44
|
+
task: GroupTask
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Tuple
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def make_patch(path: Tuple[str, ...], kv: Dict[str, str]) -> Dict[str, Any]:
|
|
5
|
+
"""
|
|
6
|
+
path=("个人信息","联系方式"), kv={"电话":"..","邮箱":".."} =>
|
|
7
|
+
{"个人信息":{"联系方式":{"电话":"..","邮箱":".."}}}
|
|
8
|
+
"""
|
|
9
|
+
node: Dict[str, Any] = dict(kv)
|
|
10
|
+
for p in reversed(path):
|
|
11
|
+
node = {p: node}
|
|
12
|
+
return node
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def slim_docs(docs: List[str], max_doc_chars: int) -> List[str]:
|
|
16
|
+
out = []
|
|
17
|
+
for d in docs or []:
|
|
18
|
+
s = str(d)
|
|
19
|
+
if len(s) > max_doc_chars:
|
|
20
|
+
s = s[:max_doc_chars] + "…"
|
|
21
|
+
out.append(s)
|
|
22
|
+
return out
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def merge_dicts(a: Dict, b: Dict) -> Dict:
|
|
26
|
+
return {**a, **b}
|
|
27
|
+
|
|
28
|
+
def deep_merge(a: Dict[str, Any], b: Dict[str, Any]) -> Dict[str, Any]:
|
|
29
|
+
out = dict(a or {})
|
|
30
|
+
for k, v in (b or {}).items():
|
|
31
|
+
if k in out and isinstance(out[k], dict) and isinstance(v, dict):
|
|
32
|
+
out[k] = deep_merge(out[k], v)
|
|
33
|
+
else:
|
|
34
|
+
out[k] = v
|
|
35
|
+
return out
|