noesium 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/core/__init__.py +4 -0
- noesium/core/agent/__init__.py +14 -0
- noesium/core/agent/base.py +227 -0
- noesium/core/consts.py +6 -0
- noesium/core/goalith/conflict/conflict.py +104 -0
- noesium/core/goalith/conflict/detector.py +53 -0
- noesium/core/goalith/decomposer/__init__.py +6 -0
- noesium/core/goalith/decomposer/base.py +46 -0
- noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
- noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
- noesium/core/goalith/decomposer/prompts.py +140 -0
- noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
- noesium/core/goalith/errors.py +22 -0
- noesium/core/goalith/goalgraph/graph.py +526 -0
- noesium/core/goalith/goalgraph/node.py +179 -0
- noesium/core/goalith/replanner/base.py +31 -0
- noesium/core/goalith/replanner/replanner.py +36 -0
- noesium/core/goalith/service.py +26 -0
- noesium/core/llm/__init__.py +154 -0
- noesium/core/llm/base.py +152 -0
- noesium/core/llm/litellm.py +528 -0
- noesium/core/llm/llamacpp.py +487 -0
- noesium/core/llm/message.py +184 -0
- noesium/core/llm/ollama.py +459 -0
- noesium/core/llm/openai.py +520 -0
- noesium/core/llm/openrouter.py +89 -0
- noesium/core/llm/prompt.py +551 -0
- noesium/core/memory/__init__.py +11 -0
- noesium/core/memory/base.py +464 -0
- noesium/core/memory/memu/__init__.py +24 -0
- noesium/core/memory/memu/config/__init__.py +26 -0
- noesium/core/memory/memu/config/activity/config.py +46 -0
- noesium/core/memory/memu/config/event/config.py +46 -0
- noesium/core/memory/memu/config/markdown_config.py +241 -0
- noesium/core/memory/memu/config/profile/config.py +48 -0
- noesium/core/memory/memu/llm_adapter.py +129 -0
- noesium/core/memory/memu/memory/__init__.py +31 -0
- noesium/core/memory/memu/memory/actions/__init__.py +40 -0
- noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
- noesium/core/memory/memu/memory/actions/base_action.py +342 -0
- noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
- noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
- noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
- noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
- noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
- noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
- noesium/core/memory/memu/memory/embeddings.py +130 -0
- noesium/core/memory/memu/memory/file_manager.py +306 -0
- noesium/core/memory/memu/memory/memory_agent.py +578 -0
- noesium/core/memory/memu/memory/recall_agent.py +376 -0
- noesium/core/memory/memu/memory_store.py +628 -0
- noesium/core/memory/models.py +149 -0
- noesium/core/msgbus/__init__.py +12 -0
- noesium/core/msgbus/base.py +395 -0
- noesium/core/orchestrix/__init__.py +0 -0
- noesium/core/py.typed +0 -0
- noesium/core/routing/__init__.py +20 -0
- noesium/core/routing/base.py +66 -0
- noesium/core/routing/router.py +241 -0
- noesium/core/routing/strategies/__init__.py +9 -0
- noesium/core/routing/strategies/dynamic_complexity.py +361 -0
- noesium/core/routing/strategies/self_assessment.py +147 -0
- noesium/core/routing/types.py +38 -0
- noesium/core/toolify/__init__.py +39 -0
- noesium/core/toolify/base.py +360 -0
- noesium/core/toolify/config.py +138 -0
- noesium/core/toolify/mcp_integration.py +275 -0
- noesium/core/toolify/registry.py +214 -0
- noesium/core/toolify/toolkits/__init__.py +1 -0
- noesium/core/tracing/__init__.py +37 -0
- noesium/core/tracing/langgraph_hooks.py +308 -0
- noesium/core/tracing/opik_tracing.py +144 -0
- noesium/core/tracing/token_tracker.py +166 -0
- noesium/core/utils/__init__.py +10 -0
- noesium/core/utils/logging.py +172 -0
- noesium/core/utils/statistics.py +12 -0
- noesium/core/utils/typing.py +17 -0
- noesium/core/vector_store/__init__.py +79 -0
- noesium/core/vector_store/base.py +94 -0
- noesium/core/vector_store/pgvector.py +304 -0
- noesium/core/vector_store/weaviate.py +383 -0
- noesium-0.1.0.dist-info/METADATA +525 -0
- noesium-0.1.0.dist-info/RECORD +86 -0
- noesium-0.1.0.dist-info/WHEEL +5 -0
- noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
- noesium-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MemU Markdown Configuration System - YAML based configuration
|
|
3
|
+
|
|
4
|
+
All file type configurations are centralized in a single YAML file to make
|
|
5
|
+
maintenance and debugging easier. Each item also points to its prompt file
|
|
6
|
+
under its corresponding folder when available.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class MarkdownFileConfig:
|
|
18
|
+
"""Minimal markdown file configuration"""
|
|
19
|
+
|
|
20
|
+
name: str # File type name
|
|
21
|
+
filename: str # Filename
|
|
22
|
+
description: str # File description
|
|
23
|
+
folder_path: str # Folder path
|
|
24
|
+
prompt_path: str # Prompt file path
|
|
25
|
+
context: str = "rag" # Context mode: "all" means put entire content in context, "rag" means use RAG search
|
|
26
|
+
rag_length: int = 50 # RAG length, -1 means all, other values mean number of lines
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class MarkdownConfigManager:
|
|
30
|
+
"""Manager for loading markdown file configurations from YAML."""
|
|
31
|
+
|
|
32
|
+
def __init__(self):
|
|
33
|
+
self.config_base_dir = Path(__file__).parent
|
|
34
|
+
self.yaml_path = self.config_base_dir / "memory_cat_config.yaml"
|
|
35
|
+
self._files_config: Dict[str, MarkdownFileConfig] = {}
|
|
36
|
+
self._processing_order: List[str] = []
|
|
37
|
+
self._load_all_configs()
|
|
38
|
+
|
|
39
|
+
def _load_all_configs(self) -> None:
|
|
40
|
+
"""Load all configurations from YAML file.
|
|
41
|
+
|
|
42
|
+
Supports the following YAML structure:
|
|
43
|
+
|
|
44
|
+
categories:
|
|
45
|
+
system: [ {name, filename, description, folder_name, prompt_file, context, rag_length}, ... ]
|
|
46
|
+
custom: [ ... ]
|
|
47
|
+
|
|
48
|
+
Both system and custom lists are concatenated to form the basic categories.
|
|
49
|
+
"""
|
|
50
|
+
self._files_config = {}
|
|
51
|
+
|
|
52
|
+
if not self.yaml_path.exists():
|
|
53
|
+
print(f"Warning: YAML configuration file not found: {self.yaml_path}")
|
|
54
|
+
self._processing_order = []
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
raw_config: Dict[str, Any] = yaml.safe_load(self.yaml_path.read_text(encoding="utf-8")) or {}
|
|
59
|
+
except Exception as e:
|
|
60
|
+
print(f"Warning: Failed to parse YAML configuration {self.yaml_path}: {e}")
|
|
61
|
+
self._processing_order = []
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
categories_section: Dict[str, Any] = raw_config.get("categories", {}) or {}
|
|
65
|
+
system_list = categories_section.get("system", []) or []
|
|
66
|
+
custom_list = categories_section.get("custom", []) or []
|
|
67
|
+
|
|
68
|
+
# Maintain order: system first, then custom
|
|
69
|
+
combined_items: List[Dict[str, Any]] = []
|
|
70
|
+
if isinstance(system_list, list):
|
|
71
|
+
combined_items.extend(system_list)
|
|
72
|
+
if isinstance(custom_list, list):
|
|
73
|
+
combined_items.extend(custom_list)
|
|
74
|
+
|
|
75
|
+
processing_order: List[str] = []
|
|
76
|
+
|
|
77
|
+
for item in combined_items:
|
|
78
|
+
if not isinstance(item, dict):
|
|
79
|
+
continue
|
|
80
|
+
name: Optional[str] = item.get("name")
|
|
81
|
+
if not name:
|
|
82
|
+
# Skip invalid entries without name
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
# Compute folder and prompt paths
|
|
86
|
+
folder_name: str = item.get("folder_name", name)
|
|
87
|
+
folder_path = self.config_base_dir / folder_name
|
|
88
|
+
prompt_file_name: str = item.get("prompt_file", "prompt.txt")
|
|
89
|
+
prompt_path = folder_path / prompt_file_name
|
|
90
|
+
|
|
91
|
+
self._files_config[name] = MarkdownFileConfig(
|
|
92
|
+
name=name,
|
|
93
|
+
filename=item.get("filename", f"{name}.md"),
|
|
94
|
+
description=item.get("description", ""),
|
|
95
|
+
folder_path=str(folder_path),
|
|
96
|
+
prompt_path=str(prompt_path) if prompt_path.exists() else "",
|
|
97
|
+
context=item.get("context", "rag"),
|
|
98
|
+
rag_length=int(item.get("rag_length", 50)),
|
|
99
|
+
)
|
|
100
|
+
processing_order.append(name)
|
|
101
|
+
|
|
102
|
+
self._processing_order = processing_order
|
|
103
|
+
|
|
104
|
+
def get_file_config(self, file_type: str) -> Optional[MarkdownFileConfig]:
|
|
105
|
+
"""Get configuration for specified file type"""
|
|
106
|
+
return self._files_config.get(file_type)
|
|
107
|
+
|
|
108
|
+
def get_all_file_types(self) -> List[str]:
|
|
109
|
+
"""Get all supported file types"""
|
|
110
|
+
return list(self._files_config.keys())
|
|
111
|
+
|
|
112
|
+
def get_processing_order(self) -> List[str]:
|
|
113
|
+
"""Get processing order"""
|
|
114
|
+
return self._processing_order.copy()
|
|
115
|
+
|
|
116
|
+
def get_file_description(self, file_type: str) -> str:
|
|
117
|
+
"""Get description of file type"""
|
|
118
|
+
config = self.get_file_config(file_type)
|
|
119
|
+
return config.description if config else ""
|
|
120
|
+
|
|
121
|
+
def validate_file_type(self, file_type: str) -> bool:
|
|
122
|
+
"""Validate if file type is supported"""
|
|
123
|
+
return file_type in self._files_config
|
|
124
|
+
|
|
125
|
+
def get_prompt_path(self, file_type: str) -> str:
|
|
126
|
+
"""Get prompt file path"""
|
|
127
|
+
config = self.get_file_config(file_type)
|
|
128
|
+
return config.prompt_path if config else ""
|
|
129
|
+
|
|
130
|
+
def get_folder_path(self, file_type: str) -> str:
|
|
131
|
+
"""Get folder path"""
|
|
132
|
+
config = self.get_file_config(file_type)
|
|
133
|
+
return config.folder_path if config else ""
|
|
134
|
+
|
|
135
|
+
def get_file_types_mapping(self) -> Dict[str, str]:
|
|
136
|
+
"""Get mapping from file type to filename"""
|
|
137
|
+
return {name: config.filename for name, config in self._files_config.items()}
|
|
138
|
+
|
|
139
|
+
def get_context_mode(self, file_type: str) -> str:
|
|
140
|
+
"""Get context mode for specified file type"""
|
|
141
|
+
config = self.get_file_config(file_type)
|
|
142
|
+
return config.context if config else "rag"
|
|
143
|
+
|
|
144
|
+
def get_rag_length(self, file_type: str) -> int:
|
|
145
|
+
"""Get RAG length for specified file type"""
|
|
146
|
+
config = self.get_file_config(file_type)
|
|
147
|
+
return config.rag_length if config else 50
|
|
148
|
+
|
|
149
|
+
def get_all_context_configs(self) -> Dict[str, Dict[str, Any]]:
|
|
150
|
+
"""Get context configurations for all file types"""
|
|
151
|
+
return {
|
|
152
|
+
file_type: {"context": config.context, "rag_length": config.rag_length}
|
|
153
|
+
for file_type, config in self._files_config.items()
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# Global configuration manager instance
|
|
158
|
+
_config_manager = None
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def get_config_manager() -> MarkdownConfigManager:
|
|
162
|
+
"""Get configuration manager instance"""
|
|
163
|
+
global _config_manager
|
|
164
|
+
if _config_manager is None:
|
|
165
|
+
_config_manager = MarkdownConfigManager()
|
|
166
|
+
return _config_manager
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
# Maintain backward compatible API functions
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def detect_file_type(filename: str, content: str = "") -> str:
|
|
173
|
+
"""Intelligently detect file type based on filename"""
|
|
174
|
+
manager = get_config_manager()
|
|
175
|
+
file_types = manager.get_all_file_types()
|
|
176
|
+
|
|
177
|
+
if not file_types:
|
|
178
|
+
return "activity"
|
|
179
|
+
|
|
180
|
+
# Detect based on filename keywords
|
|
181
|
+
filename_lower = filename.lower()
|
|
182
|
+
|
|
183
|
+
# Detect profile type
|
|
184
|
+
if any(keyword in filename_lower for keyword in ["profile", "personal_info", "bio", "resume"]):
|
|
185
|
+
if "profile" in file_types:
|
|
186
|
+
return "profile"
|
|
187
|
+
|
|
188
|
+
# Detect event type
|
|
189
|
+
if any(keyword in filename_lower for keyword in ["event", "events", "activity", "milestone"]):
|
|
190
|
+
if "event" in file_types:
|
|
191
|
+
return "event"
|
|
192
|
+
|
|
193
|
+
# Detect activity type
|
|
194
|
+
if any(keyword in filename_lower for keyword in ["activity", "activities", "daily", "diary", "log"]):
|
|
195
|
+
if "activity" in file_types:
|
|
196
|
+
return "activity"
|
|
197
|
+
|
|
198
|
+
# If no match, return first available type
|
|
199
|
+
return file_types[0]
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def get_required_files() -> List[str]:
|
|
203
|
+
"""Get list of required file types"""
|
|
204
|
+
manager = get_config_manager()
|
|
205
|
+
return manager.get_all_file_types()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def get_optional_files() -> List[str]:
|
|
209
|
+
"""Get list of optional file types (currently empty)"""
|
|
210
|
+
return []
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def get_simple_summary() -> Dict[str, Any]:
|
|
214
|
+
"""Get configuration summary"""
|
|
215
|
+
manager = get_config_manager()
|
|
216
|
+
file_types = manager.get_all_file_types()
|
|
217
|
+
|
|
218
|
+
required_files = {}
|
|
219
|
+
for file_type in file_types:
|
|
220
|
+
config = manager.get_file_config(file_type)
|
|
221
|
+
if config:
|
|
222
|
+
required_files[file_type] = {
|
|
223
|
+
"filename": config.filename,
|
|
224
|
+
"description": config.description,
|
|
225
|
+
"folder": config.folder_path,
|
|
226
|
+
"context": config.context,
|
|
227
|
+
"rag_length": config.rag_length,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
"required_files": required_files,
|
|
232
|
+
"optional_files": {},
|
|
233
|
+
"total_files": len(file_types),
|
|
234
|
+
"processing_principle": f"Load {len(file_types)} categories from YAML (system + custom)",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def get_all_file_configs() -> Dict[str, MarkdownFileConfig]:
|
|
239
|
+
"""Get all file configurations"""
|
|
240
|
+
manager = get_config_manager()
|
|
241
|
+
return {file_type: manager.get_file_config(file_type) for file_type in manager.get_all_file_types()}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Profile type configuration file
|
|
3
|
+
Record character's basic personal information only (age, location, background, etc.) - NO EVENTS
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class ProfileConfig:
|
|
11
|
+
"""Profile file configuration"""
|
|
12
|
+
|
|
13
|
+
# Basic information
|
|
14
|
+
name: str = "profile"
|
|
15
|
+
filename: str = "profile.md"
|
|
16
|
+
description: str = (
|
|
17
|
+
"Record character's basic personal information only (age, location, background, demographics) - excludes events and activities"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Folder path
|
|
21
|
+
folder_name: str = "profile"
|
|
22
|
+
prompt_file: str = "prompt.txt"
|
|
23
|
+
config_file: str = "config.py"
|
|
24
|
+
|
|
25
|
+
# RAG configuration
|
|
26
|
+
context: str = "all" # "all" means put entire content in context, "rag" means use RAG search only
|
|
27
|
+
rag_length: int = -1 # RAG length, -1 means all, other values mean number of lines
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Create configuration instance
|
|
31
|
+
CONFIG = ProfileConfig()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_config():
|
|
35
|
+
"""Get profile configuration"""
|
|
36
|
+
return CONFIG
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def get_file_info():
|
|
40
|
+
"""Get file information"""
|
|
41
|
+
return {
|
|
42
|
+
"name": CONFIG.name,
|
|
43
|
+
"filename": CONFIG.filename,
|
|
44
|
+
"description": CONFIG.description,
|
|
45
|
+
"folder": CONFIG.folder_name,
|
|
46
|
+
"context": CONFIG.context,
|
|
47
|
+
"rag_length": CONFIG.rag_length,
|
|
48
|
+
}
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from typing import Any, Dict, List
|
|
2
|
+
|
|
3
|
+
from noesium.core.utils.logging import get_logger
|
|
4
|
+
|
|
5
|
+
logger = get_logger(__name__)
|
|
6
|
+
|
|
7
|
+
from noesium.core.llm import BaseLLMClient, get_llm_client
|
|
8
|
+
|
|
9
|
+
################################################################################
|
|
10
|
+
# MemU agent compatibility
|
|
11
|
+
################################################################################
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MemoryLLMAdapter:
|
|
15
|
+
"""Adapter to make noesium LLM clients compatible with memory agent system"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, llm_client: BaseLLMClient):
|
|
18
|
+
"""Initialize with the original LLM client"""
|
|
19
|
+
self.llm_client = llm_client
|
|
20
|
+
|
|
21
|
+
def simple_chat(self, message: str) -> str:
|
|
22
|
+
"""
|
|
23
|
+
Simple chat method that wraps the completion method
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
message: The message to send to the LLM
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
str: The LLM response
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
# Convert single message to messages format
|
|
33
|
+
messages = [{"role": "user", "content": message}]
|
|
34
|
+
|
|
35
|
+
# Call the completion method
|
|
36
|
+
response = self.llm_client.completion(messages)
|
|
37
|
+
|
|
38
|
+
# Return the response as string
|
|
39
|
+
return str(response)
|
|
40
|
+
|
|
41
|
+
except Exception as e:
|
|
42
|
+
logger.error(f"Error in simple_chat: {e}")
|
|
43
|
+
raise
|
|
44
|
+
|
|
45
|
+
def chat_completion(self, messages: List[Dict[str, str]], tools=None, tool_choice=None, **kwargs) -> Any:
|
|
46
|
+
"""
|
|
47
|
+
Chat completion method for automated memory processing
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
messages: List of message dictionaries
|
|
51
|
+
tools: Optional tools for function calling
|
|
52
|
+
tool_choice: Tool choice strategy
|
|
53
|
+
**kwargs: Additional arguments
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Mock response object for memory agent compatibility
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
# For now, call the regular completion method
|
|
60
|
+
# In a full implementation, this would handle tool calls properly
|
|
61
|
+
response_text = self.llm_client.completion(messages, **kwargs)
|
|
62
|
+
|
|
63
|
+
# Create a mock response object that the memory agent expects
|
|
64
|
+
class MockResponse:
|
|
65
|
+
def __init__(self, content, success=True):
|
|
66
|
+
self.success = success
|
|
67
|
+
self.content = content
|
|
68
|
+
self.tool_calls = [] # No function calling in this simplified version
|
|
69
|
+
self.error = None if success else "Mock error"
|
|
70
|
+
|
|
71
|
+
return MockResponse(str(response_text))
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.error(f"Error in chat_completion: {e}")
|
|
75
|
+
|
|
76
|
+
class MockResponse:
|
|
77
|
+
def __init__(self, error_msg):
|
|
78
|
+
self.success = False
|
|
79
|
+
self.content = ""
|
|
80
|
+
self.tool_calls = []
|
|
81
|
+
self.error = error_msg
|
|
82
|
+
|
|
83
|
+
return MockResponse(str(e))
|
|
84
|
+
|
|
85
|
+
def embed(self, text: str) -> List[float]:
|
|
86
|
+
"""
|
|
87
|
+
Generate embeddings for text using the underlying LLM client
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
text: Text to embed
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List[float]: Embedding vector
|
|
94
|
+
"""
|
|
95
|
+
return self.llm_client.embed(text)
|
|
96
|
+
|
|
97
|
+
def embed_batch(self, texts: List[str]) -> List[List[float]]:
|
|
98
|
+
"""
|
|
99
|
+
Generate embeddings for multiple texts using the underlying LLM client
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
texts: List of texts to embed
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
List[List[float]]: List of embedding vectors
|
|
106
|
+
"""
|
|
107
|
+
return self.llm_client.embed_batch(texts)
|
|
108
|
+
|
|
109
|
+
def get_embedding_dimensions(self) -> int:
|
|
110
|
+
"""
|
|
111
|
+
Get the embedding dimensions from the underlying LLM client
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
int: Embedding dimensions
|
|
115
|
+
"""
|
|
116
|
+
return self.llm_client.get_embedding_dimensions()
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _get_llm_client_memu_compatible(**kwargs) -> BaseLLMClient:
|
|
120
|
+
"""
|
|
121
|
+
Get an LLM client with optional MemU system compatibility
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
**kwargs: Additional arguments to pass to the LLM client
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
BaseLLMClient: Configured LLM client
|
|
128
|
+
"""
|
|
129
|
+
return MemoryLLMAdapter(get_llm_client(structured_output=True, **kwargs))
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MemU Memory Module - Function Calling Architecture
|
|
3
|
+
|
|
4
|
+
Modern memory system with function calling interface:
|
|
5
|
+
|
|
6
|
+
CORE ARCHITECTURE:
|
|
7
|
+
- MemoryAgent: Function calling interface for LLM agents (main interface)
|
|
8
|
+
- RecallAgent: File system operations and content retrieval
|
|
9
|
+
|
|
10
|
+
STORAGE:
|
|
11
|
+
- MemoryFileManager: File operations for memory storage (.md files)
|
|
12
|
+
- EmbeddingClient: Vector embedding generation for semantic search
|
|
13
|
+
|
|
14
|
+
WORKFLOW:
|
|
15
|
+
1. LLM Agent → Function Calling → Memory Operations → Markdown Files
|
|
16
|
+
2. Memory stored with embeddings for semantic retrieval (per-line basis)
|
|
17
|
+
3. RecallAgent provides file system scanning and content search capabilities
|
|
18
|
+
4. All memory operations exposed as standardized function calls
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from .embeddings import get_default_embedding_client
|
|
22
|
+
from .file_manager import MemoryFileManager
|
|
23
|
+
from .memory_agent import MemoryAgent
|
|
24
|
+
from .recall_agent import RecallAgent
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"MemoryAgent", # Function calling interface
|
|
28
|
+
"RecallAgent",
|
|
29
|
+
"MemoryFileManager",
|
|
30
|
+
"get_default_embedding_client",
|
|
31
|
+
]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory Actions Module
|
|
3
|
+
|
|
4
|
+
Individual action implementations for memory operations.
|
|
5
|
+
Each action is a standalone module that can be loaded dynamically.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# Import all actions
|
|
9
|
+
from .add_activity_memory import AddActivityMemoryAction
|
|
10
|
+
from .base_action import BaseAction
|
|
11
|
+
from .cluster_memories import ClusterMemoriesAction
|
|
12
|
+
from .generate_suggestions import GenerateMemorySuggestionsAction
|
|
13
|
+
|
|
14
|
+
# from .get_available_categories import GetAvailableCategoriesAction
|
|
15
|
+
from .link_related_memories import LinkRelatedMemoriesAction
|
|
16
|
+
from .run_theory_of_mind import RunTheoryOfMindAction
|
|
17
|
+
from .update_memory_with_suggestions import UpdateMemoryWithSuggestionsAction
|
|
18
|
+
|
|
19
|
+
# Registry of all available actions
|
|
20
|
+
ACTION_REGISTRY = {
|
|
21
|
+
"add_activity_memory": AddActivityMemoryAction,
|
|
22
|
+
# "get_available_categories": GetAvailableCategoriesAction,
|
|
23
|
+
"link_related_memories": LinkRelatedMemoriesAction,
|
|
24
|
+
"generate_memory_suggestions": GenerateMemorySuggestionsAction,
|
|
25
|
+
"update_memory_with_suggestions": UpdateMemoryWithSuggestionsAction,
|
|
26
|
+
"run_theory_of_mind": RunTheoryOfMindAction,
|
|
27
|
+
"cluster_memories": ClusterMemoriesAction,
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
"BaseAction",
|
|
32
|
+
"ACTION_REGISTRY",
|
|
33
|
+
"AddActivityMemoryAction",
|
|
34
|
+
# "GetAvailableCategoriesAction",
|
|
35
|
+
"LinkRelatedMemoriesAction",
|
|
36
|
+
"GenerateMemorySuggestionsAction",
|
|
37
|
+
"UpdateMemoryWithSuggestionsAction",
|
|
38
|
+
"RunTheoryOfMindAction",
|
|
39
|
+
"ClusterMemoriesAction",
|
|
40
|
+
]
|