alayaflow 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. alayaflow/__init__.py +5 -0
  2. alayaflow/api/__init__.py +5 -0
  3. alayaflow/api/api_singleton.py +81 -0
  4. alayaflow/clients/alayamem/base_client.py +19 -0
  5. alayaflow/clients/alayamem/http_client.py +64 -0
  6. alayaflow/common/config.py +106 -0
  7. alayaflow/component/__init__.py +0 -0
  8. alayaflow/component/chat_model.py +20 -0
  9. alayaflow/component/intent_classifier.py +94 -0
  10. alayaflow/component/langflow/__init__.py +0 -0
  11. alayaflow/component/langflow/intent_classifier.py +83 -0
  12. alayaflow/component/llm_node.py +123 -0
  13. alayaflow/component/memory.py +50 -0
  14. alayaflow/component/retrieve_node.py +17 -0
  15. alayaflow/component/web_search.py +126 -0
  16. alayaflow/execution/__init__.py +6 -0
  17. alayaflow/execution/env_manager.py +424 -0
  18. alayaflow/execution/executor_manager.py +59 -0
  19. alayaflow/execution/executors/__init__.py +9 -0
  20. alayaflow/execution/executors/base_executor.py +9 -0
  21. alayaflow/execution/executors/naive_executor.py +121 -0
  22. alayaflow/execution/executors/uv_executor.py +125 -0
  23. alayaflow/execution/executors/worker_executor.py +12 -0
  24. alayaflow/execution/langfuse_tracing.py +104 -0
  25. alayaflow/execution/workflow_runner.py +98 -0
  26. alayaflow/utils/singleton.py +14 -0
  27. alayaflow/workflow/__init__.py +6 -0
  28. alayaflow/workflow/runnable/__init__.py +7 -0
  29. alayaflow/workflow/runnable/base_runnable_workflow.py +19 -0
  30. alayaflow/workflow/runnable/state_graph_runnable_workflow.py +23 -0
  31. alayaflow/workflow/workflow_info.py +50 -0
  32. alayaflow/workflow/workflow_loader.py +168 -0
  33. alayaflow/workflow/workflow_manager.py +257 -0
  34. alayaflow-0.1.0.dist-info/METADATA +99 -0
  35. alayaflow-0.1.0.dist-info/RECORD +37 -0
  36. alayaflow-0.1.0.dist-info/WHEEL +4 -0
  37. alayaflow-0.1.0.dist-info/licenses/LICENSE +661 -0
alayaflow/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ """AlayaFlow - A desktop platform for executing LangGraph workflows with multiple executors."""
2
+
3
+
4
+ __version__ = "0.1.0"
5
+
@@ -0,0 +1,5 @@
1
+ from .api_singleton import APISingleton as Flow
2
+
3
+ __all__ = [
4
+ "Flow",
5
+ ]
@@ -0,0 +1,81 @@
1
+ from typing import Generator, Dict, List, Self
2
+
3
+ from alayaflow.utils.singleton import SingletonMeta
4
+ from alayaflow.workflow import WorkflowManager
5
+ from alayaflow.execution import ExecutorManager, ExecutorType
6
+ from alayaflow.common.config import settings
7
+
8
+
9
+ class APISingleton(metaclass=SingletonMeta):
10
+ def __init__(self) -> None:
11
+ self.workflow_manager = WorkflowManager()
12
+ self.executor_manager = ExecutorManager(
13
+ workflow_manager=self.workflow_manager
14
+ )
15
+ self._inited = False
16
+
17
+ def is_inited(self) -> bool:
18
+ return self._inited
19
+
20
+ def _check_init(self) -> None:
21
+ if not self._inited:
22
+ raise ValueError("Flow APISingleton 未初始化,请先调用 init 方法")
23
+
24
+ def init(self, config: dict = {}) -> Self:
25
+ """初始化 Flow APISingleton"""
26
+ # Overwrite all for now
27
+ settings.alayahub_url = config.get("alayahub_url", settings.alayahub_url)
28
+ settings.langfuse_enabled = config.get("langfuse_enabled", settings.langfuse_enabled)
29
+ settings.langfuse_public_key = config.get("langfuse_public_key", settings.langfuse_public_key)
30
+ settings.langfuse_secret_key = config.get("langfuse_secret_key", settings.langfuse_secret_key)
31
+ settings.langfuse_url = config.get("langfuse_url", settings.langfuse_url)
32
+ self._inited = True
33
+ return self
34
+
35
+ def list_loaded_workflow_ids(self) -> List[str]:
36
+ """列举已加载的工作流"""
37
+ self._check_init()
38
+ return self.workflow_manager.list_loaded_workflow_ids()
39
+
40
+ def list_available_workflows(self) -> Dict[str, List[str]]:
41
+ """列举工作流,分别返回本地已安装和远程未安装的工作流"""
42
+ self._check_init()
43
+ return self.workflow_manager.list_available_workflows()
44
+
45
+ def get_workflow_info(self, workflow_id: str) -> dict:
46
+ """获取工作流信息"""
47
+ self._check_init()
48
+ return self.workflow_manager.get_workflow_info(workflow_id)
49
+
50
+ def install_workflow(self, workflow_id: str) -> bool:
51
+ """安装工作流"""
52
+ self._check_init()
53
+ return self.workflow_manager.install_workflow(workflow_id)
54
+
55
+ def uninstall_workflow(self, workflow_id: str) -> bool:
56
+ """卸载工作流"""
57
+ self._check_init()
58
+ return self.workflow_manager.uninstall_workflow(workflow_id)
59
+
60
+ def load_workflow(self, workflow_id: str, version: str, init_args: dict = {}):
61
+ """加载工作流"""
62
+ self._check_init()
63
+ return self.workflow_manager.load_workflow(workflow_id, version, init_args)
64
+
65
+ def exec_workflow(
66
+ self,
67
+ workflow_id: str,
68
+ version: str,
69
+ input_data: dict,
70
+ user_config: dict,
71
+ executor_type: str | ExecutorType = ExecutorType.NAIVE
72
+ ) -> Generator[dict, None, None]:
73
+ """执行工作流"""
74
+ self._check_init()
75
+ yield from self.executor_manager.exec_workflow(
76
+ workflow_id=workflow_id,
77
+ version=version,
78
+ input_data=input_data,
79
+ user_config=user_config,
80
+ executor_type=executor_type
81
+ )
@@ -0,0 +1,19 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict, List, Any
3
+
4
+ class BaseAlayaMemClient(ABC):
5
+ @abstractmethod
6
+ def init_session(self, user_id: str, session_id: str) -> Dict:
7
+ pass
8
+
9
+ @abstractmethod
10
+ def vdb_query(self, messages: List[Any], limit: int) -> Dict:
11
+ pass
12
+
13
+ @abstractmethod
14
+ def add_session_messages(self, user_id: str, session_id: str, messages: List[Any]) -> None:
15
+ pass
16
+
17
+ @abstractmethod
18
+ def query(self, user_id: str, session_id: str, message: Any) -> Dict:
19
+ pass
@@ -0,0 +1,64 @@
1
+ import requests
2
+ from typing import List, Any
3
+
4
+ from alayaflow.clients.alayamem.base_client import BaseAlayaMemClient
5
+
6
+
7
+ class HttpAlayaMemClient(BaseAlayaMemClient):
8
+ def __init__(self, base_url: str):
9
+ self.base_url = base_url.rstrip("/")
10
+
11
+ def init_session(self, user_id, user_name, agent_name):
12
+ return self._post("/init", {
13
+ "user_id": user_id,
14
+ "user_name": "unknown",
15
+ "agent_name": "unknown",
16
+ })
17
+
18
+ def vdb_query(self, messages: List[Any], limit: int, collection_name: str):
19
+ query_text = self._extract_query(messages)
20
+ return self._post("/vdb/query", {
21
+ "collection_name": collection_name,
22
+ "query_text": query_text,
23
+ "limit": limit,
24
+ })
25
+
26
+ def query(self, user_id, message):
27
+ return self._post("/query", {
28
+ "user_id": user_id,
29
+ "question": self._msg_content(message),
30
+ })
31
+
32
+ def add_session_messages(self, user_id, messages):
33
+ for msg in messages:
34
+ return self._post("/add_message", {
35
+ "user_id": user_id,
36
+ "message": self._msg_content(msg),
37
+ "is_user": self._is_user(msg),
38
+ "speaker_name": "unknown",
39
+ })
40
+
41
+ # ---------- private helpers ----------
42
+
43
+ def _post(self, path: str, payload: dict):
44
+ resp = requests.post(f"{self.base_url}{path}", json=payload)
45
+ resp.raise_for_status()
46
+ return resp.json()
47
+
48
+ def _msg_content(self, msg):
49
+ # 支持字典格式
50
+ if isinstance(msg, dict):
51
+ return msg.get("content", str(msg))
52
+ # 支持对象格式
53
+ return msg.content if hasattr(msg, "content") else str(msg)
54
+
55
+ def _extract_query(self, messages):
56
+ if not messages:
57
+ return ""
58
+ last_msg = messages[-1]
59
+ return self._msg_content(last_msg)
60
+
61
+ def _is_user(self, msg):
62
+ if isinstance(msg, dict):
63
+ return msg.get("role", "").lower() == "user"
64
+ return "Human" in msg.__class__.__name__ or "User" in msg.__class__.__name__
@@ -0,0 +1,106 @@
1
+ from pathlib import Path
2
+ from typing import Literal, Optional
3
+ from pydantic import model_validator
4
+ from pydantic_settings import BaseSettings, SettingsConfigDict
5
+
6
+ class Settings(BaseSettings):
7
+ """支持.env改写所有路径的配置类"""
8
+ # 开发模式配置:
9
+ # - "dev": pip install -e 可编辑安装,包安装到 <project>/.alaya.ai/...,可直接修改源码
10
+ # - "dev-uneditable": pip install . 标准安装,包会安装到 site-package
11
+ # - "prod":pip install alayaflow 部署模式,使用 ~/.alaya.ai/alayaflow
12
+ dev_mode: Literal["dev", "dev-uneditable", "prod"] = "dev"
13
+
14
+ def is_dev(self) -> bool:
15
+ return self.dev_mode == "dev"
16
+
17
+ def is_uneditable_dev(self) -> bool:
18
+ return self.dev_mode == "dev-uneditable"
19
+
20
+ def is_prod(self) -> bool:
21
+ return self.dev_mode == "prod"
22
+
23
+ alayaflow_root: Optional[Path] = None
24
+ workspace_root: Optional[Path] = None
25
+ resources_root: Optional[Path] = None
26
+ workflow_storage_path: Optional[Path] = None
27
+ agent_storage_path: Optional[Path] = None
28
+ app_storage_path: Optional[Path] = None
29
+
30
+ alayahub_url: str = "http://localhost:8000"
31
+
32
+ # Langfuse 配置
33
+ langfuse_enabled: bool = False
34
+ langfuse_public_key: Optional[str] = ""
35
+ langfuse_secret_key: Optional[str] = ""
36
+ langfuse_url: Optional[str] = "http://100.64.0.34:4000/"
37
+
38
+ model_config = SettingsConfigDict(
39
+ env_file=".env",
40
+ env_file_encoding="utf-8",
41
+ extra="ignore"
42
+ )
43
+
44
+ @property
45
+ def cache_dir(self) -> Path:
46
+ return self.workspace_root / "cache"
47
+
48
+ @property
49
+ def envs_dir(self) -> Path:
50
+ return self.workspace_root / "envs"
51
+
52
+ @model_validator(mode="after")
53
+ def set_default_paths(self):
54
+
55
+ current_file = Path(__file__).absolute()
56
+ project_root = current_file.parent.parent.parent.parent
57
+
58
+ is_editable_install = (project_root / "pyproject.toml").exists() or (project_root / "setup.py").exists()
59
+
60
+ if self.alayaflow_root is None:
61
+ if self.is_dev() and is_editable_install:
62
+ self.alayaflow_root = project_root
63
+ else:
64
+ self.alayaflow_root = current_file.parent.parent.parent
65
+ else:
66
+ # 将相对路径转换为绝对路径
67
+ alayaflow_root_path = Path(self.alayaflow_root)
68
+ if not alayaflow_root_path.is_absolute():
69
+ # 如果是相对路径,相对于当前工作目录解析
70
+ self.alayaflow_root = alayaflow_root_path.resolve()
71
+ else:
72
+ self.alayaflow_root = alayaflow_root_path
73
+
74
+ if self.workspace_root is None:
75
+ if self.is_dev() and is_editable_install:
76
+ self.workspace_root = project_root / ".alaya.ai" / "alayaflow"
77
+ else:
78
+ self.workspace_root = Path.home() / ".alaya.ai" / "alayaflow"
79
+ else:
80
+ self.workspace_root = Path(self.workspace_root)
81
+
82
+ if self.workflow_storage_path is None:
83
+ self.workflow_storage_path = self.workspace_root / "workflows"
84
+ else:
85
+ self.workflow_storage_path = Path(self.workflow_storage_path)
86
+
87
+ return self
88
+
89
+ @model_validator(mode='after')
90
+ def create_directories(self):
91
+ """
92
+ 在配置加载(实例化)完成后,自动检查并创建必要的目录结构
93
+ """
94
+ self.workspace_root.mkdir(parents=True, exist_ok=True)
95
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
96
+ self.envs_dir.mkdir(parents=True, exist_ok=True)
97
+ self.workflow_storage_path.mkdir(parents=True, exist_ok=True)
98
+
99
+ return self
100
+
101
+ @model_validator(mode='after')
102
+ def print_config(self):
103
+ print(self.model_dump())
104
+ return self
105
+
106
+ settings = Settings()
File without changes
@@ -0,0 +1,20 @@
1
+ import requests
2
+
3
+ from langchain_openai import ChatOpenAI
4
+
5
+ from alayaflow.common.config import settings
6
+
7
+ def mk_chat_model_deepseek():
8
+ return ChatOpenAI(
9
+ model="deepseek-chat",
10
+ api_key="sk-4fe7cd96f5e948c79168025372e2327c",
11
+ base_url="https://api.deepseek.com/v1",
12
+ )
13
+
14
+ def mk_chat_model_jet(base_url: str, model_name: str):
15
+ return ChatOpenAI(
16
+ model=model_name,
17
+ api_key="EMPTY",
18
+ base_url=base_url,
19
+ )
20
+
@@ -0,0 +1,94 @@
1
+ from typing import List
2
+ from langchain_core.prompts import ChatPromptTemplate
3
+ from langchain_core.output_parsers import JsonOutputParser
4
+ from langchain_core.exceptions import OutputParserException
5
+
6
+ from typing import List, Dict, Any, TypedDict, Annotated, Optional
7
+ from langchain_core.language_models import BaseChatModel
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ class IntentionResult(BaseModel):
12
+ """
13
+ 意图分类结果模型,标准化返回意图索引和分类原因
14
+ 适配 LangChain 结构化输出、JSON 解析器,支持自动类型校验
15
+ """
16
+ # 意图分类索引ID:对应intents列表的下标,兜底时为fallback_index(默认-1)
17
+ classification_id: int = Field(
18
+ ...,
19
+ description="意图分类的索引ID,匹配输入intents列表的下标,兜底值通常为-1",
20
+ gt=-2 # 简单校验:索引ID最小为-1(避免无效负数,可根据业务调整)
21
+ )
22
+
23
+ reason: str = Field(
24
+ ...,
25
+ description="意图分类的依据说明,异常场景下为具体错误信息",
26
+ min_length=1 # 简单校验:不可为空字符串
27
+ )
28
+
29
+ DEFAULT_INTENT_PROMPT_TEMPLATE = """
30
+ 你是专业的意图分类助手,需严格按照以下规则处理:
31
+ 1. 从给定的选项中为用户输入匹配唯一的意图,返回对应的索引ID(fallback_index={fallback_index} 为兜底选项,仅当无匹配时使用)
32
+ 2. 必须以 **纯JSON格式** 输出,字段为 classification_id(int类型)和 reason(str类型),无其他多余文本、注释
33
+ 3. reason 需简洁说明分类依据,不超过50字
34
+
35
+ 可用意图选项:
36
+ {intents}
37
+ """
38
+
39
+ def execute_intent_classification(
40
+ query: str,
41
+ intents: List[str],
42
+ model: BaseChatModel,
43
+ fallback_index: int = -1
44
+ ) -> IntentionResult:
45
+ """
46
+ 核心业务逻辑:执行意图分类(适配所有支持JSON输出的大模型)。
47
+ """
48
+ if not intents:
49
+ return IntentionResult(classification_id=fallback_index, reason='input intent list is empty')
50
+
51
+ parser = JsonOutputParser(pydantic_object=IntentionResult)
52
+
53
+ formatted_intents = "\n".join([f"选项{i}: {intent}" for i, intent in enumerate(intents)])
54
+ prompt = ChatPromptTemplate.from_template(DEFAULT_INTENT_PROMPT_TEMPLATE)
55
+ full_prompt = prompt.format_prompt(
56
+ fallback_index=fallback_index,
57
+ intents=formatted_intents
58
+ )
59
+
60
+ user_prompt = f"### 用户输入\n{query}\n\n{parser.get_format_instructions()}"
61
+
62
+ try:
63
+ # 显式指定 response_format={"type": "json_object"},强制模型返回JSON
64
+ response = model.invoke(
65
+ [
66
+ ("system", full_prompt.to_string()),
67
+ ("user", user_prompt)
68
+ ],
69
+ response_format={"type": "json_object"}
70
+ )
71
+ result_dict = parser.parse(response.content)
72
+ return IntentionResult(**result_dict)
73
+ except OutputParserException as e:
74
+ error_msg = f"JSON解析失败,模型输出非标准格式:{str(e)}"
75
+ import sys
76
+ sys.stderr.write(f"Error during classification: {error_msg}\n")
77
+ return IntentionResult(classification_id=fallback_index, reason=error_msg)
78
+ except Exception as e:
79
+ error_msg = f"分类过程中发生错误:{str(e)}"
80
+ import sys
81
+ sys.stderr.write(f"Error during classification: {error_msg}\n")
82
+ return IntentionResult(classification_id=fallback_index, reason=error_msg)
83
+
84
+
85
+ # def inner_test():
86
+ # from alayaflow.component.chat_model import mk_chat_model_deepseek
87
+ # model = mk_chat_model_deepseek()
88
+ # query = "你好"
89
+ # intents = ["问候", "查询"]
90
+ # result = execute_intent_classification(query, intents, model)
91
+ # print(result)
92
+
93
+ # if __name__ == "__main__":
94
+ # inner_test()
File without changes
@@ -0,0 +1,83 @@
1
+ from langflow.custom import Component
2
+ from langflow.io import MessageTextInput, IntInput, Output, MultilineInput
3
+ from langflow.schema import Data, Message
4
+
5
+ from alayaflow.component.intent_classifier import execute_intent_classification
6
+ from alayaflow.component.chat_model import mk_chat_model_deepseek
7
+
8
+ class IntentClassifierComponent(Component):
9
+ display_name = "Intent Classifier"
10
+ description = "基于 LLM 的意图分类器,输出分类 ID 和原因。"
11
+ icon = "Split"
12
+ name = "IntentClassifier"
13
+
14
+ inputs = [
15
+ # Input(
16
+ # name="llm",
17
+ # display_name="Language Model",
18
+ # info="请连接一个支持 Structured Output 的模型 (如 OpenAI)。",
19
+ # input_types=["LanguageModel"], # 允许连接 LLM 节点
20
+ # required=True,
21
+ # ),
22
+ MessageTextInput(
23
+ name="query",
24
+ display_name="User Query",
25
+ info="用户的输入文本",
26
+ required=True,
27
+ ),
28
+ MultilineInput(
29
+ name="intents_text",
30
+ display_name="Intents List",
31
+ info="每行输入一个意图描述。",
32
+ value="查询订单状态\n取消订阅服务\n转人工客服",
33
+ ),
34
+ IntInput(
35
+ name="fallback_index",
36
+ display_name="Fallback Index",
37
+ value=-1,
38
+ advanced=True,
39
+ ),
40
+ ]
41
+
42
+ outputs = [
43
+ Output(
44
+ name="result_data",
45
+ display_name="Result (Data)",
46
+ method="classify_to_data",
47
+ ),
48
+ Output(
49
+ name="result_message",
50
+ display_name="Result (Message)",
51
+ method="classify_to_message",
52
+ ),
53
+ ]
54
+
55
+ def classify_to_data(self) -> Data:
56
+ """输出结构化数据,方便后续逻辑判断"""
57
+ # 1. 处理输入
58
+ query = self.query
59
+ # 将多行文本转换为列表,去除空行
60
+ intents = [line.strip() for line in self.intents_text.split('\n') if line.strip()]
61
+ # model = self.llm
62
+ model = mk_chat_model_deepseek()
63
+ fallback = self.fallback_index
64
+
65
+ # 2. 调用你的业务逻辑
66
+ result = execute_intent_classification(query, intents, model, fallback)
67
+
68
+ # 更新节点状态显示
69
+ self.status = f"Classified as ID: {result.classification_id}"
70
+
71
+ # 3. 返回 Data 对象
72
+ return Data(data=result.model_dump())
73
+
74
+ def classify_to_message(self) -> Message:
75
+ """输出文本消息,方便调试查看"""
76
+ data_output = self.classify_to_data()
77
+ result = data_output.data
78
+
79
+ text_output = (
80
+ f"分类结果 ID: {result.get('classification_id', 'N/A')}\n"
81
+ f"原因: {result.get('reason', 'N/A')}"
82
+ )
83
+ return Message(text=text_output)
@@ -0,0 +1,123 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from enum import IntEnum
5
+ from functools import cached_property
6
+ from typing import Any, Dict, Optional
7
+
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, BaseMessage
10
+ from langchain_core.runnables import Runnable
11
+
12
+
13
+ class ResponseFormat(IntEnum):
14
+ TEXT = 0
15
+ JSON = 1
16
+
17
+ class LLMComponent:
18
+ def __init__(
19
+ self,
20
+ *,
21
+ # ===== 模型 & prompt =====
22
+ model_name: str,
23
+ system_prompt: str,
24
+ prompt: str,
25
+
26
+ # ===== 采样参数 =====
27
+ temperature: Optional[float] = None,
28
+ top_p: Optional[float] = None,
29
+ max_tokens: Optional[int] = None,
30
+
31
+ # ===== 输出控制 =====
32
+ response_format: ResponseFormat = ResponseFormat.TEXT,
33
+ json_schema: Optional[Dict[str, Any]] = None,
34
+ outputs: Optional[Dict[str, str]] = None,
35
+ retry_json_once: bool = True,
36
+ ):
37
+ # —— 配置即成员变量(= Spec)——
38
+ self.model_name = model_name
39
+ self.system_prompt = system_prompt
40
+ self.prompt = prompt
41
+
42
+ self.temperature = temperature
43
+ self.top_p = top_p
44
+ self.max_tokens = max_tokens
45
+
46
+ self.response_format = response_format
47
+ self.json_schema = json_schema
48
+ self.outputs = outputs or {}
49
+ self.retry_json_once = retry_json_once
50
+
51
+
52
+ @cached_property
53
+ def llm(self) -> Runnable:
54
+ return ChatOpenAI(model=self.model_name, api_key="sk-4fe7cd96f5e948c79168025372e2327c", base_url="https://api.deepseek.com/v1")
55
+
56
+ def _get_llm(self) -> Runnable:
57
+ llm = self.llm
58
+ bind_kwargs: Dict[str, Any] = {}
59
+
60
+ if self.temperature is not None:
61
+ bind_kwargs["temperature"] = self.temperature
62
+ if self.top_p is not None:
63
+ bind_kwargs["top_p"] = self.top_p
64
+ if self.max_tokens is not None:
65
+ bind_kwargs["max_tokens"] = self.max_tokens
66
+
67
+ if bind_kwargs:
68
+ try:
69
+ llm = llm.bind(**bind_kwargs)
70
+ except Exception:
71
+ pass
72
+
73
+ return llm
74
+
75
+ def _invoke(self, system_prompt: str) -> AIMessage:
76
+ llm = self._get_llm()
77
+ resp = llm.invoke(
78
+ [
79
+ SystemMessage(content=system_prompt),
80
+ HumanMessage(content=self.prompt),
81
+ ]
82
+ )
83
+
84
+ if isinstance(resp, AIMessage):
85
+ return resp
86
+
87
+ if isinstance(resp, BaseMessage):
88
+ return AIMessage(
89
+ content=getattr(resp, "content", str(resp)),
90
+ additional_kwargs=getattr(resp, "additional_kwargs", {}) or {},
91
+ response_metadata=getattr(resp, "response_metadata", {}) or {},
92
+ )
93
+
94
+ return AIMessage(content=str(resp))
95
+
96
+ def __call__(self) -> AIMessage:
97
+ if self.response_format == ResponseFormat.TEXT:
98
+ return self._invoke(self.system_prompt)
99
+
100
+ json_system = (
101
+ self.system_prompt
102
+ + "\n\n【输出格式要求】\n"
103
+ "你必须只输出严格合法的 JSON。\n"
104
+ "不要输出任何解释、前后缀、Markdown 代码块或多余文本。\n"
105
+ )
106
+
107
+ msg = self._invoke(json_system)
108
+
109
+ try:
110
+ json.loads(msg.content)
111
+ return msg
112
+ except Exception:
113
+ if not self.retry_json_once:
114
+ return msg
115
+
116
+ strict_system = (
117
+ self.system_prompt
118
+ + "\n\n【严格输出格式要求】\n"
119
+ "只输出严格合法的 JSON(json.loads 必须可解析)。\n"
120
+ "禁止输出任何非 JSON 字符。\n"
121
+ )
122
+ return self._invoke(strict_system)
123
+
@@ -0,0 +1,50 @@
1
+ from alayaflow.clients.alayamem.http_client import HttpAlayaMemClient
2
+
3
+
4
+ def query_vdb_message(alayamem_url: str, messages, limit, collection_name = "file_watcher_collection"):
5
+ mem_client = HttpAlayaMemClient(alayamem_url)
6
+ try:
7
+ result = mem_client.vdb_query(messages, limit, collection_name)
8
+ return {"vdb_results": result.get("results", []), "vdb_response": result}
9
+ except Exception as e:
10
+ return {"vdb_results": [], "error": str(e)}
11
+
12
+
13
+ def init_memory(alayamem_url: str, user_id, user_name="unknown", agent_name="unknown"):
14
+ mem_client = HttpAlayaMemClient(alayamem_url)
15
+ try:
16
+ return mem_client.init_session(user_id, user_name, agent_name)
17
+ except Exception as e:
18
+ return {"memory_initialized": False}
19
+
20
+
21
+ def query_message(alayamem_url: str, user_id, messages):
22
+ mem_client = HttpAlayaMemClient(alayamem_url)
23
+
24
+ if not messages:
25
+ return
26
+
27
+ last_message = messages[-1]
28
+
29
+ try:
30
+ resp = mem_client.query(user_id, last_message)
31
+ except Exception as e:
32
+ print(f"[Query User Message] Error: {e}")
33
+ return []
34
+
35
+ return resp
36
+
37
+
38
+ def add_message(alayamem_url: str, user_id, session_id, messages):
39
+ mem_client = HttpAlayaMemClient(alayamem_url)
40
+
41
+ if not messages:
42
+ return
43
+
44
+ try:
45
+ return mem_client.add_session_messages(user_id, messages)
46
+ except Exception as e:
47
+ print(f"[Add Messages] Error: {e}")
48
+ pass
49
+
50
+ return