noesium 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/core/__init__.py +4 -0
- noesium/core/agent/__init__.py +14 -0
- noesium/core/agent/base.py +227 -0
- noesium/core/consts.py +6 -0
- noesium/core/goalith/conflict/conflict.py +104 -0
- noesium/core/goalith/conflict/detector.py +53 -0
- noesium/core/goalith/decomposer/__init__.py +6 -0
- noesium/core/goalith/decomposer/base.py +46 -0
- noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
- noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
- noesium/core/goalith/decomposer/prompts.py +140 -0
- noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
- noesium/core/goalith/errors.py +22 -0
- noesium/core/goalith/goalgraph/graph.py +526 -0
- noesium/core/goalith/goalgraph/node.py +179 -0
- noesium/core/goalith/replanner/base.py +31 -0
- noesium/core/goalith/replanner/replanner.py +36 -0
- noesium/core/goalith/service.py +26 -0
- noesium/core/llm/__init__.py +154 -0
- noesium/core/llm/base.py +152 -0
- noesium/core/llm/litellm.py +528 -0
- noesium/core/llm/llamacpp.py +487 -0
- noesium/core/llm/message.py +184 -0
- noesium/core/llm/ollama.py +459 -0
- noesium/core/llm/openai.py +520 -0
- noesium/core/llm/openrouter.py +89 -0
- noesium/core/llm/prompt.py +551 -0
- noesium/core/memory/__init__.py +11 -0
- noesium/core/memory/base.py +464 -0
- noesium/core/memory/memu/__init__.py +24 -0
- noesium/core/memory/memu/config/__init__.py +26 -0
- noesium/core/memory/memu/config/activity/config.py +46 -0
- noesium/core/memory/memu/config/event/config.py +46 -0
- noesium/core/memory/memu/config/markdown_config.py +241 -0
- noesium/core/memory/memu/config/profile/config.py +48 -0
- noesium/core/memory/memu/llm_adapter.py +129 -0
- noesium/core/memory/memu/memory/__init__.py +31 -0
- noesium/core/memory/memu/memory/actions/__init__.py +40 -0
- noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
- noesium/core/memory/memu/memory/actions/base_action.py +342 -0
- noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
- noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
- noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
- noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
- noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
- noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
- noesium/core/memory/memu/memory/embeddings.py +130 -0
- noesium/core/memory/memu/memory/file_manager.py +306 -0
- noesium/core/memory/memu/memory/memory_agent.py +578 -0
- noesium/core/memory/memu/memory/recall_agent.py +376 -0
- noesium/core/memory/memu/memory_store.py +628 -0
- noesium/core/memory/models.py +149 -0
- noesium/core/msgbus/__init__.py +12 -0
- noesium/core/msgbus/base.py +395 -0
- noesium/core/orchestrix/__init__.py +0 -0
- noesium/core/py.typed +0 -0
- noesium/core/routing/__init__.py +20 -0
- noesium/core/routing/base.py +66 -0
- noesium/core/routing/router.py +241 -0
- noesium/core/routing/strategies/__init__.py +9 -0
- noesium/core/routing/strategies/dynamic_complexity.py +361 -0
- noesium/core/routing/strategies/self_assessment.py +147 -0
- noesium/core/routing/types.py +38 -0
- noesium/core/toolify/__init__.py +39 -0
- noesium/core/toolify/base.py +360 -0
- noesium/core/toolify/config.py +138 -0
- noesium/core/toolify/mcp_integration.py +275 -0
- noesium/core/toolify/registry.py +214 -0
- noesium/core/toolify/toolkits/__init__.py +1 -0
- noesium/core/tracing/__init__.py +37 -0
- noesium/core/tracing/langgraph_hooks.py +308 -0
- noesium/core/tracing/opik_tracing.py +144 -0
- noesium/core/tracing/token_tracker.py +166 -0
- noesium/core/utils/__init__.py +10 -0
- noesium/core/utils/logging.py +172 -0
- noesium/core/utils/statistics.py +12 -0
- noesium/core/utils/typing.py +17 -0
- noesium/core/vector_store/__init__.py +79 -0
- noesium/core/vector_store/base.py +94 -0
- noesium/core/vector_store/pgvector.py +304 -0
- noesium/core/vector_store/weaviate.py +383 -0
- noesium-0.1.0.dist-info/METADATA +525 -0
- noesium-0.1.0.dist-info/RECORD +86 -0
- noesium-0.1.0.dist-info/WHEEL +5 -0
- noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
- noesium-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Add Activity Memory Action
|
|
3
|
+
|
|
4
|
+
Adds new activity memory content with strict no-pronouns formatting, following the same
|
|
5
|
+
high-quality standards as update_memory_with_suggestions for self-contained memory items.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Any, Dict
|
|
12
|
+
|
|
13
|
+
from .base_action import BaseAction
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AddActivityMemoryAction(BaseAction):
|
|
19
|
+
"""
|
|
20
|
+
Action to add new activity memory content with strict formatting requirements
|
|
21
|
+
|
|
22
|
+
Ensures all memory items are complete, self-contained sentences with no pronouns,
|
|
23
|
+
following the same standards as update_memory_with_suggestions.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def action_name(self) -> str:
|
|
28
|
+
return "add_activity_memory"
|
|
29
|
+
|
|
30
|
+
def get_schema(self) -> Dict[str, Any]:
|
|
31
|
+
"""Return OpenAI-compatible function schema"""
|
|
32
|
+
return {
|
|
33
|
+
"name": "add_activity_memory",
|
|
34
|
+
"description": "Add new activity memory content with strict no-pronouns formatting for complete, self-contained memory items",
|
|
35
|
+
"parameters": {
|
|
36
|
+
"type": "object",
|
|
37
|
+
"properties": {
|
|
38
|
+
"character_name": {
|
|
39
|
+
"type": "string",
|
|
40
|
+
"description": "Name of the character",
|
|
41
|
+
},
|
|
42
|
+
"content": {
|
|
43
|
+
"type": "string",
|
|
44
|
+
"description": "Complete original conversation text exactly as provided - do NOT modify, extract, or summarize",
|
|
45
|
+
},
|
|
46
|
+
"session_date": {
|
|
47
|
+
"type": "string",
|
|
48
|
+
"description": "Date of the session (e.g., '2024-01-15')",
|
|
49
|
+
"default": None,
|
|
50
|
+
},
|
|
51
|
+
"generate_embeddings": {
|
|
52
|
+
"type": "boolean",
|
|
53
|
+
"description": "Whether to generate embeddings for semantic search",
|
|
54
|
+
"default": True,
|
|
55
|
+
},
|
|
56
|
+
},
|
|
57
|
+
"required": ["character_name", "content"],
|
|
58
|
+
},
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def execute(
|
|
62
|
+
self,
|
|
63
|
+
character_name: str,
|
|
64
|
+
content: str,
|
|
65
|
+
session_date: str = None,
|
|
66
|
+
generate_embeddings: bool = True,
|
|
67
|
+
) -> Dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Execute add activity memory operation with strict formatting
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
character_name: Name of the character
|
|
73
|
+
content: Raw content to process and format
|
|
74
|
+
session_date: Date of the session
|
|
75
|
+
generate_embeddings: Whether to generate embeddings for the content
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Dict containing operation result including formatted content and embedding info
|
|
79
|
+
"""
|
|
80
|
+
try:
|
|
81
|
+
# Use current date if not provided
|
|
82
|
+
if not session_date:
|
|
83
|
+
session_date = datetime.now().strftime("%Y-%m-%d")
|
|
84
|
+
|
|
85
|
+
# Process raw content through LLM to ensure strict formatting
|
|
86
|
+
formatted_content = self._format_content_with_llm(character_name, content, session_date)
|
|
87
|
+
|
|
88
|
+
if not formatted_content.strip():
|
|
89
|
+
return self._add_metadata({"success": False, "error": "LLM returned empty formatted content"})
|
|
90
|
+
|
|
91
|
+
# Add memory IDs with timestamp to the formatted content
|
|
92
|
+
memory_items, content_with_ids = self._add_memory_ids_with_timestamp(formatted_content, session_date)
|
|
93
|
+
|
|
94
|
+
# Save first, then add embedding for just the new content
|
|
95
|
+
success = self._append_memory_content(character_name, "activity", content_with_ids)
|
|
96
|
+
|
|
97
|
+
# Save content with embeddings if enabled
|
|
98
|
+
if success and generate_embeddings and self.embeddings_enabled:
|
|
99
|
+
self._add_memory_item_embedding(character_name, "activity", memory_items)
|
|
100
|
+
|
|
101
|
+
if success:
|
|
102
|
+
return self._add_metadata(
|
|
103
|
+
{
|
|
104
|
+
"success": True,
|
|
105
|
+
"character_name": character_name,
|
|
106
|
+
"category": "activity",
|
|
107
|
+
"session_date": session_date,
|
|
108
|
+
"memory_items_added": len(memory_items),
|
|
109
|
+
"memory_items": memory_items,
|
|
110
|
+
"message": f"Successfully generated {len(memory_items)} self-contained activity memory items for {character_name}",
|
|
111
|
+
}
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
return self._add_metadata({"success": False, "error": "Failed to save activity memory"})
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
return self._handle_error(e)
|
|
118
|
+
|
|
119
|
+
def _format_content_with_llm(self, character_name: str, content: str, session_date: str) -> str:
|
|
120
|
+
"""Use LLM to format content with meaningful activity grouping"""
|
|
121
|
+
|
|
122
|
+
user_name = character_name
|
|
123
|
+
|
|
124
|
+
# Create enhanced prompt for meaningful activity grouping
|
|
125
|
+
format_prompt = f"""You are formatting activity memory content for {user_name} on {session_date}.
|
|
126
|
+
|
|
127
|
+
Raw content to format:
|
|
128
|
+
{content}
|
|
129
|
+
|
|
130
|
+
**CRITICAL REQUIREMENT: GROUP RELATED CONTENT INTO MEANINGFUL ACTIVITIES**
|
|
131
|
+
|
|
132
|
+
Transform this raw content into properly formatted activity memory items following these rules:
|
|
133
|
+
|
|
134
|
+
**MEANINGFUL ACTIVITY GROUPING REQUIREMENTS:**
|
|
135
|
+
- Group related sentences/statements into single, comprehensive activity descriptions
|
|
136
|
+
- Each activity should be a complete, self-contained description of what happened
|
|
137
|
+
- Combine related dialogue, actions, and context into cohesive activity blocks
|
|
138
|
+
- Only create separate items for genuinely different activities or topics
|
|
139
|
+
- Each activity item should tell a complete "story" or "event"
|
|
140
|
+
|
|
141
|
+
**SELF-CONTAINED MEMORY REQUIREMENTS:**
|
|
142
|
+
- EVERY activity item must be complete and standalone
|
|
143
|
+
- ALWAYS include the full subject (do not use "she/he/they/it")
|
|
144
|
+
- NEVER use pronouns that depend on context (no "she", "he", "they", "it")
|
|
145
|
+
- Include specific names, places, dates, and full context in each item
|
|
146
|
+
- Each activity should be understandable without reading other items
|
|
147
|
+
- Include all relevant details, emotions, and outcomes in the activity description
|
|
148
|
+
|
|
149
|
+
**FORMAT REQUIREMENTS:**
|
|
150
|
+
1. Each line = one complete, meaningful activity (may include multiple related sentences)
|
|
151
|
+
2. NO markdown headers, bullets, numbers, or structure
|
|
152
|
+
3. Write in plain text only
|
|
153
|
+
4. Focus on comprehensive, meaningful activity descriptions
|
|
154
|
+
5. Use specific names, titles, places, and dates
|
|
155
|
+
6. Each line ends with a period
|
|
156
|
+
|
|
157
|
+
**GOOD EXAMPLES (meaningful activities, one per line):**
|
|
158
|
+
{character_name} attended a LGBTQ support group where {character_name} heard inspiring transgender stories and felt happy, thankful, accepted, and gained courage to embrace {character_name}'s true self.
|
|
159
|
+
{character_name} discussed future career plans with Melanie, expressing keen interest in counseling and mental health work to support people with similar issues, and Melanie encouraged {character_name} saying {character_name} would be a great counselor due to {character_name}'s empathy and understanding.
|
|
160
|
+
{character_name} admired Melanie's lake sunrise painting from last year, complimented the color blending, and discussed how painting serves as a great outlet for expressing feelings and relaxing after long days.
|
|
161
|
+
|
|
162
|
+
**BAD EXAMPLES (too fragmented):**
|
|
163
|
+
{character_name} went to a LGBTQ support group.
|
|
164
|
+
{character_name} heard transgender stories.
|
|
165
|
+
{character_name} felt happy and thankful.
|
|
166
|
+
{character_name} gained courage to embrace {character_name}'s true self.
|
|
167
|
+
|
|
168
|
+
**ACTIVITY GROUPING GUIDELINES:**
|
|
169
|
+
- Conversations about the same topic → Single activity
|
|
170
|
+
- Related actions and their outcomes → Single activity
|
|
171
|
+
- Emotional reactions to specific events → Include in the main activity
|
|
172
|
+
- Sequential related events → Single comprehensive activity
|
|
173
|
+
- Different topics or unrelated events → Separate activities
|
|
174
|
+
|
|
175
|
+
**QUALITY STANDARDS:**
|
|
176
|
+
- Never use "he", "she", "they", "it" - always use the person's actual name
|
|
177
|
+
- Never use "the book", "the place", "the friend" - always include full titles and names
|
|
178
|
+
- Each activity must be complete and tell the full story
|
|
179
|
+
- Include emotional context, outcomes, and significance
|
|
180
|
+
- Merge related content intelligently to create meaningful activity summaries
|
|
181
|
+
|
|
182
|
+
Transform the raw content into properly formatted activity memory items (ONE MEANINGFUL ACTIVITY PER LINE):
|
|
183
|
+
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
# Call LLM to format content
|
|
187
|
+
cleaned_content = self.llm_client.simple_chat(format_prompt)
|
|
188
|
+
|
|
189
|
+
return cleaned_content
|
|
190
|
+
|
|
191
|
+
def _add_memory_ids_with_timestamp(self, content: str, session_date: str) -> tuple[list[dict], str]:
|
|
192
|
+
"""
|
|
193
|
+
Add memory IDs with timestamp to content lines
|
|
194
|
+
Format: [memory_id][mentioned at {session_date}] {content}
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
content: Raw content
|
|
198
|
+
session_date: Date of the session
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Content with memory IDs and timestamps added to each line
|
|
202
|
+
"""
|
|
203
|
+
if not content.strip():
|
|
204
|
+
return content
|
|
205
|
+
|
|
206
|
+
lines = content.split("\n")
|
|
207
|
+
processed_items = []
|
|
208
|
+
plain_memory_lines = []
|
|
209
|
+
|
|
210
|
+
for line in lines:
|
|
211
|
+
line = line.strip()
|
|
212
|
+
if line: # Only process non-empty lines
|
|
213
|
+
# Generate new unique memory ID for this line
|
|
214
|
+
memory_id = self._generate_memory_id()
|
|
215
|
+
# Format: [memory_id][mentioned at {session_date}] {content} [links]
|
|
216
|
+
processed_items.append(
|
|
217
|
+
{
|
|
218
|
+
"memory_id": memory_id,
|
|
219
|
+
"mentioned_at": session_date,
|
|
220
|
+
"content": line,
|
|
221
|
+
"links": "",
|
|
222
|
+
}
|
|
223
|
+
)
|
|
224
|
+
plain_memory_lines.append(f"[{memory_id}][mentioned at {session_date}] {line} []")
|
|
225
|
+
|
|
226
|
+
plain_memory_text = "\n".join(plain_memory_lines)
|
|
227
|
+
|
|
228
|
+
return processed_items, plain_memory_text
|
|
229
|
+
|
|
230
|
+
def _add_memory_item_embedding(self, character_name: str, category: str, new_items: list[dict]) -> Dict[str, Any]:
|
|
231
|
+
"""Add embedding for new memory items"""
|
|
232
|
+
try:
|
|
233
|
+
if not self.embeddings_enabled or not new_items:
|
|
234
|
+
return {"success": False, "error": "Embeddings disabled or empty item"}
|
|
235
|
+
|
|
236
|
+
# Get character embeddings directory from storage manager
|
|
237
|
+
char_embeddings_dir = self.storage_manager.get_char_embeddings_dir()
|
|
238
|
+
embeddings_file = char_embeddings_dir / f"{category}_embeddings.json"
|
|
239
|
+
|
|
240
|
+
existing_embeddings = []
|
|
241
|
+
if embeddings_file.exists():
|
|
242
|
+
with open(embeddings_file, "r", encoding="utf-8") as f:
|
|
243
|
+
embeddings_data = json.load(f)
|
|
244
|
+
existing_embeddings = embeddings_data.get("embeddings", [])
|
|
245
|
+
|
|
246
|
+
# Generate embeddings for new items
|
|
247
|
+
for item in new_items:
|
|
248
|
+
if not item["content"].strip():
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
try:
|
|
252
|
+
embedding_vector = self.embedding_client.embed(item["content"])
|
|
253
|
+
|
|
254
|
+
new_item_id = f"{character_name}_{category}_item_{len(existing_embeddings)}"
|
|
255
|
+
|
|
256
|
+
new_embedding = {
|
|
257
|
+
"item_id": new_item_id,
|
|
258
|
+
"memory_id": item["memory_id"],
|
|
259
|
+
"text": item["content"],
|
|
260
|
+
"full_line": f"[{item['memory_id']}][mentioned at {item['mentioned_at']}] {item['content']} [{item['links']}]",
|
|
261
|
+
"embedding": embedding_vector,
|
|
262
|
+
"line_number": len(existing_embeddings) + 1,
|
|
263
|
+
"metadata": {
|
|
264
|
+
"character": character_name,
|
|
265
|
+
"category": category,
|
|
266
|
+
"length": len(item["content"]),
|
|
267
|
+
"mentioned_at": item["mentioned_at"],
|
|
268
|
+
"timestamp": datetime.now().isoformat(),
|
|
269
|
+
},
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
# Add to existing embeddings
|
|
273
|
+
existing_embeddings.append(new_embedding)
|
|
274
|
+
|
|
275
|
+
except Exception as e:
|
|
276
|
+
logger.warning(f"Failed to generate embedding for memory item {item.get('memory_id')}: {repr(e)}")
|
|
277
|
+
continue
|
|
278
|
+
|
|
279
|
+
# Save updated embeddings
|
|
280
|
+
embeddings_data = {
|
|
281
|
+
"category": category,
|
|
282
|
+
"timestamp": datetime.now().isoformat(),
|
|
283
|
+
"embeddings": existing_embeddings,
|
|
284
|
+
"total_embeddings": len(existing_embeddings),
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
with open(embeddings_file, "w", encoding="utf-8") as f:
|
|
288
|
+
json.dump(embeddings_data, f, indent=2, ensure_ascii=False)
|
|
289
|
+
|
|
290
|
+
return {
|
|
291
|
+
"success": True,
|
|
292
|
+
"embedding_count": len(existing_embeddings),
|
|
293
|
+
"new_items_count": len(new_items),
|
|
294
|
+
"message": f"Added embeddings for {len(new_items)} new memory items in {category}",
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
except Exception as e:
|
|
298
|
+
logger.error(f"Failed to add memory item embedding: {e}")
|
|
299
|
+
return {"success": False, "error": str(e)}
|
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base Action Class for Memory Operations
|
|
3
|
+
|
|
4
|
+
Defines the interface and common functionality for all memory actions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import re
|
|
9
|
+
import uuid
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
from typing import Any, Dict, Tuple
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BaseAction(ABC):
|
|
18
|
+
"""
|
|
19
|
+
Base class for all memory actions
|
|
20
|
+
|
|
21
|
+
Defines the standard interface that all actions must implement:
|
|
22
|
+
- get_schema(): Return OpenAI-compatible function schema
|
|
23
|
+
- execute(**kwargs): Execute the action with given arguments
|
|
24
|
+
- validate_arguments(): Validate input arguments
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, memory_core):
|
|
28
|
+
"""
|
|
29
|
+
Initialize action with memory core
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
memory_core: Core memory functionality (file manager, embeddings, config, etc.)
|
|
33
|
+
"""
|
|
34
|
+
self.memory_core = memory_core
|
|
35
|
+
self.llm_client = memory_core.llm_client
|
|
36
|
+
self.storage_manager = memory_core.storage_manager
|
|
37
|
+
self.embedding_client = memory_core.embedding_client
|
|
38
|
+
self.embeddings_enabled = memory_core.embeddings_enabled
|
|
39
|
+
self.config_manager = memory_core.config_manager
|
|
40
|
+
self.memory_types = memory_core.memory_types
|
|
41
|
+
self.basic_memory_types = memory_core.memory_types["basic"]
|
|
42
|
+
self.processing_order = memory_core.processing_order
|
|
43
|
+
# self.embeddings_dir = memory_core.embeddings_dir
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
@abstractmethod
|
|
47
|
+
def action_name(self) -> str:
|
|
48
|
+
"""Return the name of this action"""
|
|
49
|
+
|
|
50
|
+
@abstractmethod
|
|
51
|
+
def get_schema(self) -> Dict[str, Any]:
|
|
52
|
+
"""
|
|
53
|
+
Return OpenAI-compatible function schema for this action
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Dict containing function schema with name, description, and parameters
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
@abstractmethod
|
|
60
|
+
def execute(self, **kwargs) -> Dict[str, Any]:
|
|
61
|
+
"""
|
|
62
|
+
Execute the action with provided arguments
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
**kwargs: Action-specific arguments
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Dict containing execution result with success status and data
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def validate_arguments(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
|
72
|
+
"""
|
|
73
|
+
Validate input arguments against schema
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
arguments: Arguments to validate
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Dict with validation result
|
|
80
|
+
"""
|
|
81
|
+
try:
|
|
82
|
+
schema = self.get_schema()
|
|
83
|
+
required_params = schema["parameters"].get("required", [])
|
|
84
|
+
|
|
85
|
+
# Check for missing required parameters
|
|
86
|
+
missing_params = [param for param in required_params if param not in arguments]
|
|
87
|
+
|
|
88
|
+
if missing_params:
|
|
89
|
+
return {
|
|
90
|
+
"valid": False,
|
|
91
|
+
"error": f"Missing required parameters: {missing_params}",
|
|
92
|
+
"required_parameters": required_params,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
return {
|
|
96
|
+
"valid": True,
|
|
97
|
+
"message": f"Validation passed for {self.action_name}",
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
except Exception as e:
|
|
101
|
+
return {"valid": False, "error": f"Validation error: {str(e)}"}
|
|
102
|
+
|
|
103
|
+
def _add_metadata(self, result: Dict[str, Any]) -> Dict[str, Any]:
|
|
104
|
+
"""Add standard metadata to action result"""
|
|
105
|
+
if isinstance(result, dict):
|
|
106
|
+
result["action_name"] = self.action_name
|
|
107
|
+
result["timestamp"] = datetime.now().isoformat()
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
def _handle_error(self, error: Exception) -> Dict[str, Any]:
|
|
111
|
+
"""Standard error handling for actions"""
|
|
112
|
+
error_result = {
|
|
113
|
+
"success": False,
|
|
114
|
+
"error": str(error),
|
|
115
|
+
"action_name": self.action_name,
|
|
116
|
+
"timestamp": datetime.now().isoformat(),
|
|
117
|
+
}
|
|
118
|
+
logger.error(f"Action {self.action_name} failed: {error}")
|
|
119
|
+
return error_result
|
|
120
|
+
|
|
121
|
+
# ================================
|
|
122
|
+
# Memory ID Utilities
|
|
123
|
+
# ================================
|
|
124
|
+
|
|
125
|
+
def _generate_memory_id(self) -> str:
|
|
126
|
+
short_uuid = str(uuid.uuid4())[:6]
|
|
127
|
+
return f"{short_uuid}"
|
|
128
|
+
|
|
129
|
+
def _add_memory_ids_to_content(self, content: str) -> str:
|
|
130
|
+
"""
|
|
131
|
+
Add memory IDs to content lines
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
content: Raw content
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Content with memory IDs added to each line
|
|
138
|
+
"""
|
|
139
|
+
if not content.strip():
|
|
140
|
+
return content
|
|
141
|
+
|
|
142
|
+
lines = content.split("\n")
|
|
143
|
+
processed_lines = []
|
|
144
|
+
|
|
145
|
+
for line in lines:
|
|
146
|
+
line = line.strip()
|
|
147
|
+
if line: # Only process non-empty lines
|
|
148
|
+
# Always remove existing memory ID and generate a new unique one
|
|
149
|
+
if self._has_memory_id(line):
|
|
150
|
+
# Extract content without memory ID
|
|
151
|
+
_, clean_content = self._extract_memory_id(line)
|
|
152
|
+
line = clean_content
|
|
153
|
+
|
|
154
|
+
# Generate new unique memory ID for this line
|
|
155
|
+
memory_id = self._generate_memory_id()
|
|
156
|
+
processed_lines.append(f"[{memory_id}] {line}")
|
|
157
|
+
else:
|
|
158
|
+
# Keep empty lines as is
|
|
159
|
+
processed_lines.append("")
|
|
160
|
+
|
|
161
|
+
return "\n".join(processed_lines)
|
|
162
|
+
|
|
163
|
+
def _has_memory_id(self, line: str) -> bool:
|
|
164
|
+
"""
|
|
165
|
+
Check if a line already has a memory ID
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
line: Line to check
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
True if line starts with [memory_id] format
|
|
172
|
+
"""
|
|
173
|
+
pattern = r"^\[[\w\d_]+\]\s+"
|
|
174
|
+
return bool(re.match(pattern, line.strip()))
|
|
175
|
+
|
|
176
|
+
def _extract_memory_id(self, line: str) -> Tuple[str, str]:
|
|
177
|
+
"""
|
|
178
|
+
Extract memory ID and content from a line
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
line: Line with memory ID format: [memory_id] content
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Tuple of (memory_id, content)
|
|
185
|
+
"""
|
|
186
|
+
line = line.strip()
|
|
187
|
+
pattern = r"^\[([\w\d_]+)\]\s*(.*)"
|
|
188
|
+
match = re.match(pattern, line)
|
|
189
|
+
|
|
190
|
+
if match:
|
|
191
|
+
memory_id = match.group(1)
|
|
192
|
+
content = match.group(2)
|
|
193
|
+
return memory_id, content
|
|
194
|
+
else:
|
|
195
|
+
# If no memory ID found, return empty ID and full line as content
|
|
196
|
+
return "", line
|
|
197
|
+
|
|
198
|
+
def _extract_content_without_ids(self, content: str) -> str:
|
|
199
|
+
"""
|
|
200
|
+
Extract pure content without memory IDs for embedding generation
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
content: Content with memory IDs
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Content without memory IDs
|
|
207
|
+
"""
|
|
208
|
+
if not content.strip():
|
|
209
|
+
return content
|
|
210
|
+
|
|
211
|
+
lines = content.split("\n")
|
|
212
|
+
clean_lines = []
|
|
213
|
+
|
|
214
|
+
for line in lines:
|
|
215
|
+
if line.strip():
|
|
216
|
+
_, clean_content = self._extract_memory_id(line)
|
|
217
|
+
if clean_content:
|
|
218
|
+
clean_lines.append(clean_content)
|
|
219
|
+
else:
|
|
220
|
+
clean_lines.append("")
|
|
221
|
+
|
|
222
|
+
return "\n".join(clean_lines)
|
|
223
|
+
|
|
224
|
+
def _parse_memory_items(self, content: str) -> list[dict[str, Any]]:
|
|
225
|
+
"""
|
|
226
|
+
Parse content into memory items with IDs, supporting both old and new timestamp formats
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
content: Content with memory IDs
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
List of memory items with metadata
|
|
233
|
+
"""
|
|
234
|
+
if not content.strip():
|
|
235
|
+
return []
|
|
236
|
+
|
|
237
|
+
lines = content.split("\n")
|
|
238
|
+
items = []
|
|
239
|
+
|
|
240
|
+
for i, line in enumerate(lines):
|
|
241
|
+
line = line.strip()
|
|
242
|
+
if line: # Only process non-empty lines
|
|
243
|
+
memory_id, mentioned_at, clean_content, links = self._extract_timestamped_memory_item(line)
|
|
244
|
+
|
|
245
|
+
if clean_content:
|
|
246
|
+
item = {
|
|
247
|
+
"memory_id": memory_id,
|
|
248
|
+
"mentioned_at": mentioned_at,
|
|
249
|
+
"content": clean_content,
|
|
250
|
+
"links": links,
|
|
251
|
+
"full_line": line,
|
|
252
|
+
"line_number": i + 1,
|
|
253
|
+
}
|
|
254
|
+
items.append(item)
|
|
255
|
+
|
|
256
|
+
return items
|
|
257
|
+
|
|
258
|
+
def _extract_timestamped_memory_item(self, line: str) -> Tuple[str, str, str, str]:
|
|
259
|
+
"""
|
|
260
|
+
Extract memory ID, content, timestamp, and links from timestamped format
|
|
261
|
+
Format: [memory_id][mentioned at date] content [links]
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
line: Line with timestamped memory format
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
Tuple of (memory_id, content, mentioned_at, links)
|
|
268
|
+
"""
|
|
269
|
+
import re
|
|
270
|
+
|
|
271
|
+
line = line.strip()
|
|
272
|
+
|
|
273
|
+
# Pattern to match: [memory_id][mentioned at date] content [links] (links optional)
|
|
274
|
+
pattern = r"^\[([^\]]+)\]\[mentioned at ([^\]]+)\]\s*(.*?)(?:\s*\[([^\]]*)\])?$"
|
|
275
|
+
match = re.match(pattern, line)
|
|
276
|
+
|
|
277
|
+
if match:
|
|
278
|
+
memory_id = match.group(1)
|
|
279
|
+
mentioned_at = match.group(2)
|
|
280
|
+
content = match.group(3).strip()
|
|
281
|
+
links = match.group(4) if match.group(4) else ""
|
|
282
|
+
return memory_id, mentioned_at, content, links
|
|
283
|
+
else:
|
|
284
|
+
return "", "", "", ""
|
|
285
|
+
|
|
286
|
+
# ================================
|
|
287
|
+
# Common utility methods that actions can use
|
|
288
|
+
# ================================
|
|
289
|
+
|
|
290
|
+
def _load_existing_memory(self, character_name: str) -> Dict[str, str]:
|
|
291
|
+
"""Load existing memory content for all categories"""
|
|
292
|
+
existing_memory = {}
|
|
293
|
+
|
|
294
|
+
for category in self.storage_manager.get_flat_memory_types():
|
|
295
|
+
try:
|
|
296
|
+
content = self._read_memory_content(character_name, category)
|
|
297
|
+
existing_memory[category] = content if isinstance(content, str) else ""
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.warning(f"Failed to load existing {category} for {character_name}: {e}")
|
|
300
|
+
existing_memory[category] = ""
|
|
301
|
+
|
|
302
|
+
return existing_memory
|
|
303
|
+
|
|
304
|
+
def _read_memory_content(self, character_name: str, category: str) -> str:
|
|
305
|
+
"""Read memory content from storage"""
|
|
306
|
+
try:
|
|
307
|
+
# agent_id and user_id are managed inside storage_manager
|
|
308
|
+
return self.storage_manager.read_memory_file(category)
|
|
309
|
+
except Exception as e:
|
|
310
|
+
logger.warning(f"Failed to read {category} for {character_name}: {e}")
|
|
311
|
+
return ""
|
|
312
|
+
|
|
313
|
+
def _save_memory_content(self, character_name: str, category: str, content: str) -> bool:
|
|
314
|
+
"""Save memory content to storage"""
|
|
315
|
+
try:
|
|
316
|
+
# agent_id and user_id are managed inside storage_manager
|
|
317
|
+
return self.storage_manager.write_memory_file(category, content)
|
|
318
|
+
except Exception as e:
|
|
319
|
+
logger.error(f"Failed to save {category} for {character_name}: {e}")
|
|
320
|
+
return False
|
|
321
|
+
|
|
322
|
+
def _append_memory_content(self, character_name: str, category: str, content: str) -> bool:
|
|
323
|
+
"""Append memory content to storage"""
|
|
324
|
+
try:
|
|
325
|
+
# agent_id and user_id are managed inside storage_manager
|
|
326
|
+
return self.storage_manager.append_memory_file(category, content)
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logger.error(f"Failed to append {category} for {character_name}: {e}")
|
|
329
|
+
return False
|
|
330
|
+
|
|
331
|
+
def _convert_conversation_to_text(self, conversation: list[dict]) -> str:
|
|
332
|
+
"""Convert conversation list to text format for LLM processing"""
|
|
333
|
+
if not conversation or not isinstance(conversation, list):
|
|
334
|
+
return ""
|
|
335
|
+
|
|
336
|
+
text_parts = []
|
|
337
|
+
for message in conversation:
|
|
338
|
+
role = message.get("role", "unknown")
|
|
339
|
+
content = message.get("content", "")
|
|
340
|
+
text_parts.append(f"{role.upper()}: {content}")
|
|
341
|
+
|
|
342
|
+
return "\n".join(text_parts)
|