alayaflow 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {alayaflow-0.1.0 → alayaflow-0.1.1}/.alaya.ai/alayaflow/workflows/autotable/1.0.0/workflow.py +0 -2
- alayaflow-0.1.1/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/metadata.json +9 -0
- alayaflow-0.1.1/.alaya.ai/alayaflow/workflows/simple_chat/1.0.0/schemas.py +32 -0
- {alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat → alayaflow-0.1.1/.alaya.ai/alayaflow/workflows/simple_chat}/1.0.0/workflow.py +23 -30
- {alayaflow-0.1.0 → alayaflow-0.1.1}/PKG-INFO +1 -1
- {alayaflow-0.1.0 → alayaflow-0.1.1}/examples/autotable_demo.py +1 -1
- alayaflow-0.1.1/examples/chat_demo.py +87 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/pyproject.toml +1 -1
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/__init__.py +1 -1
- alayaflow-0.1.1/src/alayaflow/api/__init__.py +8 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/api/api_singleton.py +16 -6
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/chat_model.py +2 -3
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/llm_node.py +6 -15
- alayaflow-0.1.1/src/alayaflow/component/model/__init__.py +8 -0
- alayaflow-0.1.1/src/alayaflow/component/model/model_manager.py +60 -0
- alayaflow-0.1.1/src/alayaflow/component/model/schemas.py +33 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/retrieve_node.py +1 -7
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/web_search.py +12 -12
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executor_manager.py +5 -5
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executors/base_executor.py +1 -1
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executors/naive_executor.py +10 -12
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executors/uv_executor.py +2 -2
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executors/worker_executor.py +1 -1
- alayaflow-0.1.1/src/alayaflow/workflow/runnable/base_runnable_workflow.py +21 -0
- alayaflow-0.1.1/src/alayaflow/workflow/runnable/state_graph_runnable_workflow.py +29 -0
- alayaflow-0.1.1/tests/component/test_llm_node.py +159 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/workflow/conftest.py +3 -7
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/workflow/test_workflow_loader.py +3 -1
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/metadata.json +0 -9
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/__init__.py +0 -11
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/chat_nodes.py +0 -22
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/memory_nodes.py +0 -30
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/nodes/retrieval_nodes.py +0 -11
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/state.py +0 -18
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import/1.0.0/workflow.py +0 -27
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat/1.0.0/metadata.json +0 -9
- alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/test_chat/1.0.0/requirements.txt +0 -11
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/.gitignore +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/.lock +0 -0
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/CACHEDIR.TAG +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/alayaflow_installed.marker +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/deps_installed.marker +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-code.1 +0 -82
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-detect.1 +0 -97
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-explore.1 +0 -88
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-help.1 +0 -50
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-standardize.1 +0 -96
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv-view.1 +0 -76
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/man/man1/clevercsv.1 +0 -109
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/share/man/man1/ipython.1 +0 -60
- alayaflow-0.1.0/.alayaflow_workspace/envs/10474453e4fb42279788a9b4e41c8f39/share/man/man1/isympy.1 +0 -188
- alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/.gitignore +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/.lock +0 -0
- alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/CACHEDIR.TAG +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/alayaflow_installed.marker +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/python_import_1.0.0/deps_installed.marker +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/.gitignore +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/.lock +0 -0
- alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/CACHEDIR.TAG +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/alayaflow_installed.marker +0 -1
- alayaflow-0.1.0/.alayaflow_workspace/envs/test_chat_123_1.0.0/deps_installed.marker +0 -1
- alayaflow-0.1.0/PYPI_UPLOAD_GUIDE.md +0 -301
- alayaflow-0.1.0/examples/chat_demo.py +0 -47
- alayaflow-0.1.0/src/alayaflow/api/__init__.py +0 -5
- alayaflow-0.1.0/src/alayaflow/workflow/runnable/base_runnable_workflow.py +0 -19
- alayaflow-0.1.0/src/alayaflow/workflow/runnable/state_graph_runnable_workflow.py +0 -23
- alayaflow-0.1.0/tests/component/test_llm_node.py +0 -313
- {alayaflow-0.1.0 → alayaflow-0.1.1}/.alaya.ai/alayaflow/workflows/autotable/1.0.0/metadata.json +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/.alaya.ai/alayaflow/workflows/autotable/1.0.0/requirements.txt +0 -0
- {alayaflow-0.1.0/.alaya.ai/alayaflow/workflows/python_import → alayaflow-0.1.1/.alaya.ai/alayaflow/workflows/simple_chat}/1.0.0/requirements.txt +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/.github/workflows/pr-test.yml +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/.gitignore +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/LICENSE +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/README.md +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/pyproject.origin.toml +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/clients/alayamem/base_client.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/clients/alayamem/http_client.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/common/config.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/intent_classifier.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/langflow/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/langflow/intent_classifier.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/component/memory.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/env_manager.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/executors/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/langfuse_tracing.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/execution/workflow_runner.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/utils/singleton.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/workflow/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/workflow/runnable/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/workflow/workflow_info.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/workflow/workflow_loader.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/src/alayaflow/workflow/workflow_manager.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/clients/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/clients/conftest.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/clients/test_alayamem.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/component/test_intent_classifier.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/execution/test_env_reuse.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/tests/workflow/__init__.py +0 -0
- {alayaflow-0.1.0 → alayaflow-0.1.1}/uv.lock +0 -0
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from typing import TypedDict, List, Optional
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import BaseMessage, AIMessageChunk
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class WorkflowInitArgs(BaseModel):
|
|
9
|
+
alayamem_url: str = Field(..., description="AlayaMem URL")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Input(BaseModel):
|
|
13
|
+
messages: List[BaseMessage] = Field(..., description="List of input messages")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class WorkflowContext(BaseModel):
|
|
17
|
+
user_id: str = Field(..., description="User ID")
|
|
18
|
+
session_id: str = Field(..., description="Session ID")
|
|
19
|
+
chat_model_id: str = Field(..., description="Chat Model ID")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Output(BaseModel):
|
|
23
|
+
chat_response: dict = Field(..., description="Chat response")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class WorkflowState(TypedDict):
|
|
27
|
+
messages: List[BaseMessage]
|
|
28
|
+
memory_initialized: bool = False
|
|
29
|
+
retrieved_docs: Optional[List[str]]
|
|
30
|
+
stream_chunks: List[AIMessageChunk] = []
|
|
31
|
+
chat_response: Optional[dict]
|
|
32
|
+
context: Optional[str]
|
|
@@ -1,25 +1,15 @@
|
|
|
1
|
-
from typing import TypedDict, List, Optional, ClassVar
|
|
2
|
-
|
|
3
|
-
from langchain_core.messages import BaseMessage, AIMessageChunk
|
|
4
1
|
from langgraph.graph import StateGraph, START, END
|
|
2
|
+
from langgraph.runtime import Runtime
|
|
5
3
|
|
|
6
4
|
from alayaflow.component.memory import init_memory, query_message, add_message, query_vdb_message
|
|
7
|
-
from alayaflow.component.
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
user_id: str
|
|
11
|
-
session_id: str
|
|
12
|
-
messages: List[BaseMessage]
|
|
13
|
-
memory_initialized: Optional[bool]
|
|
14
|
-
stream_chunks: List[AIMessageChunk]
|
|
15
|
-
chat_response: Optional[dict]
|
|
16
|
-
retrieved_docs: Optional[List[str]]
|
|
17
|
-
context: Optional[str]
|
|
5
|
+
from alayaflow.component.model import ModelManager
|
|
6
|
+
|
|
7
|
+
from .schemas import WorkflowInitArgs, WorkflowState, WorkflowContext, Input, Output
|
|
18
8
|
|
|
19
9
|
def mk_init_memory_node(alayamem_url: str):
|
|
20
|
-
def init_memory_node(state: WorkflowState):
|
|
21
|
-
user_id =
|
|
22
|
-
session_id =
|
|
10
|
+
def init_memory_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
11
|
+
user_id = runtime.context.user_id
|
|
12
|
+
session_id = runtime.context.session_id
|
|
23
13
|
original_result = init_memory(alayamem_url, user_id, session_id)
|
|
24
14
|
updated_state = state.copy()
|
|
25
15
|
updated_state["memory_initialized"] = original_result.get("status", "") == "success"
|
|
@@ -49,8 +39,14 @@ def mk_query_vdb_message_node(alayamem_url: str):
|
|
|
49
39
|
return query_vdb_message_node
|
|
50
40
|
|
|
51
41
|
def mk_chat_node():
|
|
52
|
-
|
|
53
|
-
|
|
42
|
+
model_manager = ModelManager()
|
|
43
|
+
|
|
44
|
+
def chat_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
45
|
+
model_id = runtime.context.chat_model_id
|
|
46
|
+
chat_model = model_manager.get_model(model_id)
|
|
47
|
+
if not chat_model:
|
|
48
|
+
raise ValueError(f"无法找到模型ID为 '{model_id}' 的模型配置")
|
|
49
|
+
|
|
54
50
|
messages = state["messages"].copy()
|
|
55
51
|
updated_state = state.copy()
|
|
56
52
|
|
|
@@ -69,23 +65,20 @@ def mk_chat_node():
|
|
|
69
65
|
return chat_node
|
|
70
66
|
|
|
71
67
|
def mk_add_message_node(alayamem_url: str):
|
|
72
|
-
def add_message_node(state: WorkflowState):
|
|
73
|
-
user_id =
|
|
74
|
-
session_id =
|
|
68
|
+
def add_message_node(state: WorkflowState, runtime: Runtime[WorkflowContext]):
|
|
69
|
+
user_id = runtime.context.user_id
|
|
70
|
+
session_id = runtime.context.session_id
|
|
75
71
|
messages = state.get("messages", [])
|
|
76
72
|
add_message(alayamem_url, user_id, session_id, messages)
|
|
77
73
|
return state.copy()
|
|
78
74
|
return add_message_node
|
|
79
75
|
|
|
80
|
-
def
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
alayamem_url = init_args.get("alayamem_url")
|
|
85
|
-
if not alayamem_url:
|
|
86
|
-
raise ValueError("alayamem_url is required")
|
|
76
|
+
def create_graph(init_args: WorkflowInitArgs | dict):
|
|
77
|
+
if isinstance(init_args, dict):
|
|
78
|
+
init_args = WorkflowInitArgs(**init_args)
|
|
79
|
+
alayamem_url = init_args.alayamem_url
|
|
87
80
|
|
|
88
|
-
graph = StateGraph(WorkflowState)
|
|
81
|
+
graph = StateGraph(WorkflowState, WorkflowContext, input_type=Input, output_type=Output)
|
|
89
82
|
|
|
90
83
|
graph.add_node("init_memory_node", mk_init_memory_node(alayamem_url))
|
|
91
84
|
graph.add_node("query_vdb_message_node", mk_query_vdb_message_node(alayamem_url))
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
from alayaflow.api import Flow
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def init_flow():
|
|
8
|
+
"""Initialize flow api singleton"""
|
|
9
|
+
|
|
10
|
+
# Flow() is a singleton instance
|
|
11
|
+
# So it can be "constructed" in multiple places
|
|
12
|
+
# But there is only one instance
|
|
13
|
+
flow = Flow()
|
|
14
|
+
|
|
15
|
+
flow.init({
|
|
16
|
+
"alayahub_url": "http://your-alayahub-url",
|
|
17
|
+
})
|
|
18
|
+
flow.register_models([
|
|
19
|
+
{
|
|
20
|
+
# Local used fields
|
|
21
|
+
"name": "DeepSeek Chat",
|
|
22
|
+
"model_id": "deepseek-chat",
|
|
23
|
+
"provider_name": "DeepSeek",
|
|
24
|
+
# Connection credentials
|
|
25
|
+
"model_name": "deepseek-chat",
|
|
26
|
+
"base_url": "https://api.deepseek.com/v1",
|
|
27
|
+
"api_key": os.getenv("DEEPSEEK_API_KEY"),
|
|
28
|
+
}
|
|
29
|
+
])
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def init_workflows():
|
|
33
|
+
flow = Flow()
|
|
34
|
+
|
|
35
|
+
workflow_id = 'simple_chat'
|
|
36
|
+
workflow_version = '1.0.0'
|
|
37
|
+
init_args = {
|
|
38
|
+
"alayamem_url": "http://10.16.70.46:5555",
|
|
39
|
+
}
|
|
40
|
+
flow.load_workflow(workflow_id, workflow_version, init_args)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def run_workflow():
|
|
44
|
+
flow = Flow()
|
|
45
|
+
|
|
46
|
+
workflow_id = 'simple_chat'
|
|
47
|
+
workflow_version = '1.0.0'
|
|
48
|
+
inputs = {
|
|
49
|
+
"messages": [{"role": "user", "content": "你是谁"}]
|
|
50
|
+
}
|
|
51
|
+
context = {
|
|
52
|
+
"user_id": "test_user",
|
|
53
|
+
"session_id": "test_session",
|
|
54
|
+
"chat_model_id": "deepseek-chat",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def handle_chat_model_stream(event):
|
|
58
|
+
content = event["data"]["chunk"]['content']
|
|
59
|
+
if content:
|
|
60
|
+
print(content, end="", flush=True)
|
|
61
|
+
|
|
62
|
+
def handle_error(event):
|
|
63
|
+
sys.stderr.write(f"\n[Error Event Detected]\n")
|
|
64
|
+
sys.stderr.write("--- Error message ---\n")
|
|
65
|
+
sys.stderr.write(f"{event['error']}\n")
|
|
66
|
+
sys.stderr.write("\n--- Stack Trace ---\n")
|
|
67
|
+
sys.stderr.write(f"{event['traceback']}\n")
|
|
68
|
+
|
|
69
|
+
print("Start executing workflow...")
|
|
70
|
+
for event in flow.exec_workflow(workflow_id, workflow_version, inputs, context):
|
|
71
|
+
if "error" in event:
|
|
72
|
+
handle_error(event)
|
|
73
|
+
break
|
|
74
|
+
kind = event["event"]
|
|
75
|
+
if kind == "on_chat_model_stream":
|
|
76
|
+
handle_chat_model_stream(event)
|
|
77
|
+
print()
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def main():
|
|
81
|
+
init_flow()
|
|
82
|
+
init_workflows()
|
|
83
|
+
run_workflow()
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
if __name__ == "__main__":
|
|
87
|
+
main()
|
|
@@ -4,6 +4,7 @@ from alayaflow.utils.singleton import SingletonMeta
|
|
|
4
4
|
from alayaflow.workflow import WorkflowManager
|
|
5
5
|
from alayaflow.execution import ExecutorManager, ExecutorType
|
|
6
6
|
from alayaflow.common.config import settings
|
|
7
|
+
from alayaflow.component.model import ModelManager, ModelProfile
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class APISingleton(metaclass=SingletonMeta):
|
|
@@ -12,6 +13,7 @@ class APISingleton(metaclass=SingletonMeta):
|
|
|
12
13
|
self.executor_manager = ExecutorManager(
|
|
13
14
|
workflow_manager=self.workflow_manager
|
|
14
15
|
)
|
|
16
|
+
self.model_manager = ModelManager()
|
|
15
17
|
self._inited = False
|
|
16
18
|
|
|
17
19
|
def is_inited(self) -> bool:
|
|
@@ -29,9 +31,16 @@ class APISingleton(metaclass=SingletonMeta):
|
|
|
29
31
|
settings.langfuse_public_key = config.get("langfuse_public_key", settings.langfuse_public_key)
|
|
30
32
|
settings.langfuse_secret_key = config.get("langfuse_secret_key", settings.langfuse_secret_key)
|
|
31
33
|
settings.langfuse_url = config.get("langfuse_url", settings.langfuse_url)
|
|
34
|
+
|
|
32
35
|
self._inited = True
|
|
33
36
|
return self
|
|
34
37
|
|
|
38
|
+
def register_models(self, model_profiles: List[dict | ModelProfile]) -> None:
|
|
39
|
+
self._check_init()
|
|
40
|
+
profiles = [ModelProfile(**profile) if isinstance(profile, dict) else profile for profile in model_profiles]
|
|
41
|
+
for profile in profiles:
|
|
42
|
+
self.model_manager.register_profile(profile)
|
|
43
|
+
|
|
35
44
|
def list_loaded_workflow_ids(self) -> List[str]:
|
|
36
45
|
"""列举已加载的工作流"""
|
|
37
46
|
self._check_init()
|
|
@@ -66,16 +75,17 @@ class APISingleton(metaclass=SingletonMeta):
|
|
|
66
75
|
self,
|
|
67
76
|
workflow_id: str,
|
|
68
77
|
version: str,
|
|
69
|
-
|
|
70
|
-
|
|
78
|
+
inputs: dict,
|
|
79
|
+
context: dict,
|
|
71
80
|
executor_type: str | ExecutorType = ExecutorType.NAIVE
|
|
72
81
|
) -> Generator[dict, None, None]:
|
|
73
82
|
"""执行工作流"""
|
|
74
83
|
self._check_init()
|
|
75
|
-
|
|
84
|
+
for event in self.executor_manager.exec_workflow(
|
|
76
85
|
workflow_id=workflow_id,
|
|
77
86
|
version=version,
|
|
78
|
-
|
|
79
|
-
|
|
87
|
+
inputs=inputs,
|
|
88
|
+
context=context,
|
|
80
89
|
executor_type=executor_type
|
|
81
|
-
)
|
|
90
|
+
):
|
|
91
|
+
yield event
|
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
import
|
|
1
|
+
import os
|
|
2
2
|
|
|
3
3
|
from langchain_openai import ChatOpenAI
|
|
4
4
|
|
|
5
|
-
from alayaflow.common.config import settings
|
|
6
5
|
|
|
7
6
|
def mk_chat_model_deepseek():
|
|
8
7
|
return ChatOpenAI(
|
|
9
8
|
model="deepseek-chat",
|
|
10
|
-
api_key="
|
|
9
|
+
api_key=os.getenv("DEEPSEEK_API_KEY"),
|
|
11
10
|
base_url="https://api.deepseek.com/v1",
|
|
12
11
|
)
|
|
13
12
|
|
|
@@ -2,13 +2,13 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
from enum import IntEnum
|
|
5
|
-
from functools import cached_property
|
|
6
5
|
from typing import Any, Dict, Optional
|
|
7
6
|
|
|
8
|
-
from langchain_openai import ChatOpenAI
|
|
9
7
|
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, BaseMessage
|
|
10
8
|
from langchain_core.runnables import Runnable
|
|
11
9
|
|
|
10
|
+
from alayaflow.component.model import ModelManager
|
|
11
|
+
|
|
12
12
|
|
|
13
13
|
class ResponseFormat(IntEnum):
|
|
14
14
|
TEXT = 0
|
|
@@ -19,7 +19,7 @@ class LLMComponent:
|
|
|
19
19
|
self,
|
|
20
20
|
*,
|
|
21
21
|
# ===== 模型 & prompt =====
|
|
22
|
-
|
|
22
|
+
model_id: str,
|
|
23
23
|
system_prompt: str,
|
|
24
24
|
prompt: str,
|
|
25
25
|
|
|
@@ -35,7 +35,7 @@ class LLMComponent:
|
|
|
35
35
|
retry_json_once: bool = True,
|
|
36
36
|
):
|
|
37
37
|
# —— 配置即成员变量(= Spec)——
|
|
38
|
-
self.
|
|
38
|
+
self.model_id = model_id
|
|
39
39
|
self.system_prompt = system_prompt
|
|
40
40
|
self.prompt = prompt
|
|
41
41
|
|
|
@@ -47,14 +47,8 @@ class LLMComponent:
|
|
|
47
47
|
self.json_schema = json_schema
|
|
48
48
|
self.outputs = outputs or {}
|
|
49
49
|
self.retry_json_once = retry_json_once
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
@cached_property
|
|
53
|
-
def llm(self) -> Runnable:
|
|
54
|
-
return ChatOpenAI(model=self.model_name, api_key="sk-4fe7cd96f5e948c79168025372e2327c", base_url="https://api.deepseek.com/v1")
|
|
55
50
|
|
|
56
51
|
def _get_llm(self) -> Runnable:
|
|
57
|
-
llm = self.llm
|
|
58
52
|
bind_kwargs: Dict[str, Any] = {}
|
|
59
53
|
|
|
60
54
|
if self.temperature is not None:
|
|
@@ -64,11 +58,8 @@ class LLMComponent:
|
|
|
64
58
|
if self.max_tokens is not None:
|
|
65
59
|
bind_kwargs["max_tokens"] = self.max_tokens
|
|
66
60
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
llm = llm.bind(**bind_kwargs)
|
|
70
|
-
except Exception:
|
|
71
|
-
pass
|
|
61
|
+
model_manager = ModelManager()
|
|
62
|
+
llm = model_manager.get_model(self.model_id, runtime_config=bind_kwargs)
|
|
72
63
|
|
|
73
64
|
return llm
|
|
74
65
|
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import Dict, Optional, Any, Union
|
|
2
|
+
from langchain_core.language_models import BaseChatModel
|
|
3
|
+
from langchain_openai import ChatOpenAI
|
|
4
|
+
|
|
5
|
+
from alayaflow.utils.singleton import SingletonMeta
|
|
6
|
+
from alayaflow.component.model.schemas import ModelProfile, GenerationConfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ModelManager(metaclass=SingletonMeta):
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self._profiles: Dict[str, ModelProfile] = {}
|
|
12
|
+
|
|
13
|
+
def register_profile(self, profile: ModelProfile, override: bool = False) -> None:
|
|
14
|
+
if profile.model_id in self._profiles and not override:
|
|
15
|
+
raise ValueError(f"Profile with model ID {profile.model_id} already exists.")
|
|
16
|
+
self._profiles[profile.model_id] = profile
|
|
17
|
+
|
|
18
|
+
def get_model(
|
|
19
|
+
self,
|
|
20
|
+
model_id: str,
|
|
21
|
+
runtime_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None
|
|
22
|
+
) -> BaseChatModel:
|
|
23
|
+
if model_id not in self._profiles:
|
|
24
|
+
raise ValueError(f"Model ID '{model_id}' not found. Please register it first.")
|
|
25
|
+
|
|
26
|
+
profile = self._profiles[model_id]
|
|
27
|
+
|
|
28
|
+
# Merge config
|
|
29
|
+
|
|
30
|
+
final_params = profile.default_config.model_dump(exclude={"extra_kwargs"})
|
|
31
|
+
extra_kwargs = profile.default_config.extra_kwargs.copy()
|
|
32
|
+
|
|
33
|
+
if runtime_config:
|
|
34
|
+
if isinstance(runtime_config, GenerationConfig):
|
|
35
|
+
override_dict = runtime_config.model_dump(exclude_unset=True, exclude={"extra_kwargs"})
|
|
36
|
+
final_params.update(override_dict)
|
|
37
|
+
extra_kwargs.update(runtime_config.extra_kwargs)
|
|
38
|
+
elif isinstance(runtime_config, dict):
|
|
39
|
+
final_params.update(runtime_config)
|
|
40
|
+
|
|
41
|
+
# Instantiate model
|
|
42
|
+
|
|
43
|
+
if profile.model_type == "OpenAI":
|
|
44
|
+
return ChatOpenAI(
|
|
45
|
+
model=profile.model_name,
|
|
46
|
+
openai_api_key=profile.api_key.get_secret_value(),
|
|
47
|
+
openai_api_base=profile.base_url,
|
|
48
|
+
# Generation parameters
|
|
49
|
+
temperature=final_params.get("temperature"),
|
|
50
|
+
max_tokens=final_params.get("max_tokens"),
|
|
51
|
+
top_p=final_params.get("top_p"),
|
|
52
|
+
frequency_penalty=final_params.get("frequency_penalty"),
|
|
53
|
+
presence_penalty=final_params.get("presence_penalty"),
|
|
54
|
+
model_kwargs=extra_kwargs
|
|
55
|
+
)
|
|
56
|
+
else:
|
|
57
|
+
raise ValueError(f"Unsupported model type: {profile.model_type}")
|
|
58
|
+
|
|
59
|
+
def get_profile(self, model_id: str) -> Optional[ModelProfile]:
|
|
60
|
+
return self._profiles.get(model_id)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from pydantic import BaseModel, Field, ConfigDict, SecretStr
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class GenerationConfig(BaseModel):
|
|
6
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
7
|
+
|
|
8
|
+
temperature: float = Field(default=0.7, ge=0.0, le=2.0, description="采样温度")
|
|
9
|
+
max_tokens: Optional[int] = Field(default=None, description="最大生成 Token 数")
|
|
10
|
+
top_p: float = Field(default=1.0, description="核采样阈值")
|
|
11
|
+
frequency_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
|
|
12
|
+
presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
|
|
13
|
+
|
|
14
|
+
extra_kwargs: dict = Field(default_factory=dict, description="其他参数")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ModelProfile(BaseModel):
|
|
18
|
+
model_config = ConfigDict(use_enum_values=True)
|
|
19
|
+
|
|
20
|
+
# Local used fields
|
|
21
|
+
name: str = Field(..., description="模型名称")
|
|
22
|
+
model_id: str = Field(..., description="系统内部使用的唯一 ID")
|
|
23
|
+
model_type: str = Field(default="OpenAI", description="模型类型工厂标识")
|
|
24
|
+
provider_name: str = Field(default="Unknown", description="提供商名称")
|
|
25
|
+
|
|
26
|
+
# Connection Credentials
|
|
27
|
+
model_name: str = Field(..., description="厂商的模型名称")
|
|
28
|
+
base_url: str = Field(..., description="API Base URL")
|
|
29
|
+
api_key: SecretStr = Field(default=SecretStr(""), description="API Key")
|
|
30
|
+
|
|
31
|
+
# Default Generation Config
|
|
32
|
+
default_config: GenerationConfig = Field(default_factory=GenerationConfig)
|
|
33
|
+
|
|
@@ -8,10 +8,4 @@ class RetrieveComponent:
|
|
|
8
8
|
|
|
9
9
|
def __call__(self, query: str, collection_name: str, limit: int = 3) -> list[str]:
|
|
10
10
|
result = self.client.vdb_query([query], limit, collection_name)
|
|
11
|
-
return result.get('documents', [[]])[0] if result.get('documents') else []
|
|
12
|
-
|
|
13
|
-
if __name__ == "__main__":
|
|
14
|
-
client = HttpAlayaMemClient("http://10.16.70.46:5555")
|
|
15
|
-
res = client.vdb_query(messages="姓名", limit=5, collection_name="file_watcher_collection")
|
|
16
|
-
|
|
17
|
-
print(res)
|
|
11
|
+
return result.get('documents', [[]])[0] if result.get('documents') else []
|
|
@@ -111,16 +111,16 @@ def get_config_schema() -> Type[TypedDict]:
|
|
|
111
111
|
return WFConfig
|
|
112
112
|
|
|
113
113
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
print(f"result: {result}")
|
|
114
|
+
if __name__ == "__main__":
|
|
115
|
+
# Example usage
|
|
116
|
+
graph = create_graph(search_api_key="your-api-key-here")
|
|
117
|
+
result = graph.invoke({
|
|
118
|
+
"query": "search query",
|
|
119
|
+
}, config={
|
|
120
|
+
"configurable": WFConfig(
|
|
121
|
+
search_api_key="your-api-key-here",
|
|
122
|
+
search_url="https://your-search-api-url",
|
|
123
|
+
)
|
|
124
|
+
})
|
|
125
|
+
print(f"result: {result}")
|
|
126
126
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Dict, Generator, Any
|
|
2
2
|
from enum import Enum
|
|
3
3
|
|
|
4
4
|
from alayaflow.execution.executors.base_executor import BaseExecutor
|
|
@@ -43,10 +43,10 @@ class ExecutorManager:
|
|
|
43
43
|
self,
|
|
44
44
|
workflow_id: str,
|
|
45
45
|
version: str,
|
|
46
|
-
|
|
47
|
-
|
|
46
|
+
inputs: dict,
|
|
47
|
+
context: dict,
|
|
48
48
|
executor_type: ExecutorType | str = ExecutorType.NAIVE
|
|
49
|
-
) -> Generator[Dict, None, None]:
|
|
49
|
+
) -> Generator[Dict[str, Any], None, None]:
|
|
50
50
|
if isinstance(executor_type, str):
|
|
51
51
|
executor_type = ExecutorType(executor_type)
|
|
52
52
|
if executor_type not in self._executor_map:
|
|
@@ -55,5 +55,5 @@ class ExecutorManager:
|
|
|
55
55
|
f"Supported kinds: {list(self._executor_map.keys())}"
|
|
56
56
|
)
|
|
57
57
|
executor = self._executor_map[executor_type]
|
|
58
|
-
yield from executor.execute_stream(workflow_id, version,
|
|
58
|
+
yield from executor.execute_stream(workflow_id, version, inputs, context)
|
|
59
59
|
|
|
@@ -4,6 +4,6 @@ from typing import Generator, Dict
|
|
|
4
4
|
|
|
5
5
|
class BaseExecutor(ABC):
|
|
6
6
|
@abstractmethod
|
|
7
|
-
def execute_stream(self, workflow_id: str, version: str,
|
|
7
|
+
def execute_stream(self, workflow_id: str, version: str, inputs: dict, context: dict) -> Generator[Dict, None, None]:
|
|
8
8
|
pass
|
|
9
9
|
|
|
@@ -36,8 +36,8 @@ class NaiveExecutor(BaseExecutor):
|
|
|
36
36
|
self,
|
|
37
37
|
workflow_id: str,
|
|
38
38
|
version: str,
|
|
39
|
-
|
|
40
|
-
|
|
39
|
+
inputs: dict,
|
|
40
|
+
context: dict
|
|
41
41
|
) -> Generator[Dict, None, None]:
|
|
42
42
|
|
|
43
43
|
# 1) resolve workflow
|
|
@@ -47,7 +47,7 @@ class NaiveExecutor(BaseExecutor):
|
|
|
47
47
|
yield {"error": str(e), "workflow_id": workflow_id, "version": version}
|
|
48
48
|
return
|
|
49
49
|
|
|
50
|
-
print(f"NaiveExecutor execute_stream: {workflow_id} {version} {
|
|
50
|
+
print(f"NaiveExecutor execute_stream: {workflow_id} {version} {inputs} {context}")
|
|
51
51
|
|
|
52
52
|
# TODO: Support langflow workflow
|
|
53
53
|
# Only support StateGraphRunnableWorkflow now.
|
|
@@ -60,7 +60,7 @@ class NaiveExecutor(BaseExecutor):
|
|
|
60
60
|
|
|
61
61
|
def run_async_producer():
|
|
62
62
|
try:
|
|
63
|
-
asyncio.run(self._produce_events_to_queue(runnable,
|
|
63
|
+
asyncio.run(self._produce_events_to_queue(runnable, inputs, context, event_queue))
|
|
64
64
|
except Exception as e:
|
|
65
65
|
event_queue.put({"error": str(e), "traceback": traceback.format_exc(), "workflow_id": workflow_id, "version": version})
|
|
66
66
|
finally:
|
|
@@ -95,20 +95,18 @@ class NaiveExecutor(BaseExecutor):
|
|
|
95
95
|
yield self._serialize_event(item)
|
|
96
96
|
|
|
97
97
|
|
|
98
|
-
async def _produce_events_to_queue(self, runnable: BaseRunnableWorkflow,
|
|
98
|
+
async def _produce_events_to_queue(self, runnable: BaseRunnableWorkflow, inputs: dict, context: dict, event_queue: queue.Queue):
|
|
99
99
|
try:
|
|
100
100
|
# Setup tracing
|
|
101
101
|
tracing = get_tracing(settings)
|
|
102
102
|
langfuse_cb = tracing.build_callback()
|
|
103
|
-
|
|
104
|
-
# Merge user_config and tracing config
|
|
105
|
-
merged_config = {
|
|
106
|
-
"configurable": user_config
|
|
107
|
-
}
|
|
103
|
+
|
|
108
104
|
if langfuse_cb:
|
|
109
|
-
|
|
105
|
+
config = tracing.build_config(inputs, runnable.info, langfuse_cb)
|
|
106
|
+
else:
|
|
107
|
+
config = {}
|
|
110
108
|
|
|
111
|
-
async for chunk in runnable.
|
|
109
|
+
async for chunk in runnable.stream_events_async(inputs, context, config):
|
|
112
110
|
event_queue.put(chunk) # Put each event immediately (real-time)
|
|
113
111
|
except Exception as e:
|
|
114
112
|
# If execution fails, put error event in queue
|