neuro-simulator 0.0.4__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/__init__.py +10 -1
- neuro_simulator/agent/__init__.py +8 -0
- neuro_simulator/agent/api.py +737 -0
- neuro_simulator/agent/core.py +471 -0
- neuro_simulator/agent/llm.py +104 -0
- neuro_simulator/agent/memory/__init__.py +4 -0
- neuro_simulator/agent/memory/manager.py +370 -0
- neuro_simulator/agent/memory.py +137 -0
- neuro_simulator/agent/tools/__init__.py +4 -0
- neuro_simulator/agent/tools/core.py +112 -0
- neuro_simulator/agent/tools.py +69 -0
- neuro_simulator/builtin_agent.py +83 -0
- neuro_simulator/cli.py +45 -0
- neuro_simulator/config.py +217 -79
- neuro_simulator/config.yaml.example +16 -2
- neuro_simulator/letta.py +71 -45
- neuro_simulator/log_handler.py +30 -16
- neuro_simulator/main.py +167 -30
- neuro_simulator/process_manager.py +5 -2
- neuro_simulator/stream_manager.py +6 -0
- {neuro_simulator-0.0.4.dist-info → neuro_simulator-0.1.2.dist-info}/METADATA +1 -1
- neuro_simulator-0.1.2.dist-info/RECORD +31 -0
- neuro_simulator-0.0.4.dist-info/RECORD +0 -20
- {neuro_simulator-0.0.4.dist-info → neuro_simulator-0.1.2.dist-info}/WHEEL +0 -0
- {neuro_simulator-0.0.4.dist-info → neuro_simulator-0.1.2.dist-info}/entry_points.txt +0 -0
- {neuro_simulator-0.0.4.dist-info → neuro_simulator-0.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,370 @@
|
|
1
|
+
# agent/memory/manager.py
|
2
|
+
"""
|
3
|
+
Advanced memory management for the Neuro Simulator Agent
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import json
|
8
|
+
import asyncio
|
9
|
+
from typing import Dict, List, Any, Optional
|
10
|
+
from datetime import datetime
|
11
|
+
import random
|
12
|
+
import string
|
13
|
+
import sys
|
14
|
+
|
15
|
+
|
16
|
+
def generate_id(length=6) -> str:
|
17
|
+
"""Generate a random ID string"""
|
18
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
19
|
+
|
20
|
+
|
21
|
+
class MemoryManager:
|
22
|
+
"""Manages different types of memory for the agent"""
|
23
|
+
|
24
|
+
def __init__(self, working_dir: str = None):
|
25
|
+
# Use provided working directory or default to current directory
|
26
|
+
if working_dir is None:
|
27
|
+
working_dir = os.getcwd()
|
28
|
+
|
29
|
+
self.memory_dir = os.path.join(working_dir, "agent", "memory")
|
30
|
+
os.makedirs(self.memory_dir, exist_ok=True)
|
31
|
+
|
32
|
+
# Memory file paths
|
33
|
+
self.init_memory_file = os.path.join(self.memory_dir, "init_memory.json")
|
34
|
+
self.core_memory_file = os.path.join(self.memory_dir, "core_memory.json")
|
35
|
+
self.context_file = os.path.join(self.memory_dir, "context.json") # 新的上下文文件
|
36
|
+
self.temp_memory_file = os.path.join(self.memory_dir, "temp_memory.json")
|
37
|
+
|
38
|
+
# In-memory storage
|
39
|
+
self.init_memory: Dict[str, Any] = {}
|
40
|
+
self.core_memory: Dict[str, Any] = {}
|
41
|
+
self.context_history: List[Dict[str, Any]] = [] # 新的上下文历史
|
42
|
+
self.temp_memory: List[Dict[str, Any]] = [] # 真正的临时内存
|
43
|
+
|
44
|
+
async def initialize(self):
|
45
|
+
"""Load all memory types from files"""
|
46
|
+
# Load init memory (immutable by agent)
|
47
|
+
if os.path.exists(self.init_memory_file):
|
48
|
+
with open(self.init_memory_file, 'r', encoding='utf-8') as f:
|
49
|
+
self.init_memory = json.load(f)
|
50
|
+
else:
|
51
|
+
# Default init memory - this is just an example, users can customize
|
52
|
+
self.init_memory = {
|
53
|
+
"name": "Neuro-Sama",
|
54
|
+
"role": "AI VTuber",
|
55
|
+
"personality": "Friendly, curious, and entertaining",
|
56
|
+
"capabilities": [
|
57
|
+
"Chat with viewers",
|
58
|
+
"Answer questions",
|
59
|
+
"Entertain audience",
|
60
|
+
"Express opinions"
|
61
|
+
]
|
62
|
+
}
|
63
|
+
await self._save_init_memory()
|
64
|
+
|
65
|
+
# Load core memory (mutable by both agent and user)
|
66
|
+
if os.path.exists(self.core_memory_file):
|
67
|
+
with open(self.core_memory_file, 'r', encoding='utf-8') as f:
|
68
|
+
self.core_memory = json.load(f)
|
69
|
+
else:
|
70
|
+
# Default core memory with blocks
|
71
|
+
self.core_memory = {
|
72
|
+
"blocks": {
|
73
|
+
"general_knowledge": {
|
74
|
+
"id": "general_knowledge",
|
75
|
+
"title": "General Knowledge",
|
76
|
+
"description": "Basic facts and knowledge about the world",
|
77
|
+
"content": [
|
78
|
+
"The earth is round",
|
79
|
+
"Water boils at 100°C at sea level",
|
80
|
+
"Humans need oxygen to survive"
|
81
|
+
]
|
82
|
+
},
|
83
|
+
"stream_info": {
|
84
|
+
"id": "stream_info",
|
85
|
+
"title": "Stream Information",
|
86
|
+
"description": "Information about this stream and Neuro-Sama",
|
87
|
+
"content": [
|
88
|
+
"This is a simulation of Neuro-Sama, an AI VTuber",
|
89
|
+
"The stream is meant for entertainment and experimentation",
|
90
|
+
"Viewers can interact with Neuro-Sama through chat"
|
91
|
+
]
|
92
|
+
}
|
93
|
+
}
|
94
|
+
}
|
95
|
+
await self._save_core_memory()
|
96
|
+
|
97
|
+
# Load context history
|
98
|
+
if os.path.exists(self.context_file):
|
99
|
+
with open(self.context_file, 'r', encoding='utf-8') as f:
|
100
|
+
self.context_history = json.load(f)
|
101
|
+
else:
|
102
|
+
self.context_history = []
|
103
|
+
|
104
|
+
# Load temp memory (frequently changed by agent)
|
105
|
+
if os.path.exists(self.temp_memory_file):
|
106
|
+
with open(self.temp_memory_file, 'r', encoding='utf-8') as f:
|
107
|
+
self.temp_memory = json.load(f)
|
108
|
+
|
109
|
+
print("Memory manager initialized with all memory types")
|
110
|
+
|
111
|
+
async def _save_init_memory(self):
|
112
|
+
"""Save init memory to file"""
|
113
|
+
with open(self.init_memory_file, 'w', encoding='utf-8') as f:
|
114
|
+
json.dump(self.init_memory, f, ensure_ascii=False, indent=2)
|
115
|
+
|
116
|
+
async def update_init_memory(self, new_memory: Dict[str, Any]):
|
117
|
+
"""Update init memory with new values"""
|
118
|
+
self.init_memory.update(new_memory)
|
119
|
+
await self._save_init_memory()
|
120
|
+
|
121
|
+
async def _save_core_memory(self):
|
122
|
+
"""Save core memory to file"""
|
123
|
+
with open(self.core_memory_file, 'w', encoding='utf-8') as f:
|
124
|
+
json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
|
125
|
+
|
126
|
+
async def _save_context(self):
|
127
|
+
"""Save context to file"""
|
128
|
+
with open(self.context_file, 'w', encoding='utf-8') as f:
|
129
|
+
json.dump(self.context_history, f, ensure_ascii=False, indent=2)
|
130
|
+
|
131
|
+
async def _save_temp_memory(self):
|
132
|
+
"""Save temp memory to file"""
|
133
|
+
with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
|
134
|
+
json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
|
135
|
+
|
136
|
+
async def add_context_entry(self, role: str, content: str):
|
137
|
+
"""Add an entry to context"""
|
138
|
+
entry = {
|
139
|
+
"id": generate_id(),
|
140
|
+
"role": role, # "user" or "assistant"
|
141
|
+
"content": content,
|
142
|
+
"timestamp": datetime.now().isoformat()
|
143
|
+
}
|
144
|
+
self.context_history.append(entry)
|
145
|
+
|
146
|
+
# Keep only last 20 context entries (10 rounds)
|
147
|
+
if len(self.context_history) > 20:
|
148
|
+
self.context_history = self.context_history[-20:]
|
149
|
+
|
150
|
+
await self._save_context()
|
151
|
+
|
152
|
+
async def add_detailed_context_entry(self, input_messages: List[Dict[str, str]],
|
153
|
+
prompt: str, llm_response: str,
|
154
|
+
tool_executions: List[Dict[str, Any]],
|
155
|
+
final_response: str,
|
156
|
+
entry_id: str = None):
|
157
|
+
"""Add or update a detailed context entry with full LLM interaction details"""
|
158
|
+
# Check if we're updating an existing entry
|
159
|
+
if entry_id:
|
160
|
+
# Find the entry with the given ID and update it
|
161
|
+
for entry in self.context_history:
|
162
|
+
if entry.get("id") == entry_id:
|
163
|
+
entry.update({
|
164
|
+
"input_messages": input_messages,
|
165
|
+
"prompt": prompt,
|
166
|
+
"llm_response": llm_response,
|
167
|
+
"tool_executions": tool_executions,
|
168
|
+
"final_response": final_response,
|
169
|
+
"timestamp": datetime.now().isoformat()
|
170
|
+
})
|
171
|
+
await self._save_context()
|
172
|
+
return entry_id
|
173
|
+
|
174
|
+
# If no entry_id was provided or the entry wasn't found, create a new one
|
175
|
+
entry = {
|
176
|
+
"id": entry_id or generate_id(),
|
177
|
+
"type": "llm_interaction",
|
178
|
+
"role": "assistant", # Add role for llm_interaction entries
|
179
|
+
"input_messages": input_messages,
|
180
|
+
"prompt": prompt,
|
181
|
+
"llm_response": llm_response,
|
182
|
+
"tool_executions": tool_executions,
|
183
|
+
"final_response": final_response,
|
184
|
+
"timestamp": datetime.now().isoformat()
|
185
|
+
}
|
186
|
+
self.context_history.append(entry)
|
187
|
+
|
188
|
+
# Keep only last 20 context entries
|
189
|
+
if len(self.context_history) > 20:
|
190
|
+
self.context_history = self.context_history[-20:]
|
191
|
+
|
192
|
+
await self._save_context()
|
193
|
+
return entry["id"]
|
194
|
+
|
195
|
+
async def get_recent_context(self, rounds: int = 5) -> List[Dict[str, Any]]:
|
196
|
+
"""Get recent context (default: last 5 rounds, 10 entries)"""
|
197
|
+
# Each round consists of user message and assistant response
|
198
|
+
entries_needed = rounds * 2
|
199
|
+
return self.context_history[-entries_needed:] if self.context_history else []
|
200
|
+
|
201
|
+
async def get_detailed_context_history(self) -> List[Dict[str, Any]]:
|
202
|
+
"""Get the full detailed context history"""
|
203
|
+
return self.context_history
|
204
|
+
|
205
|
+
async def get_last_agent_response(self) -> Optional[str]:
|
206
|
+
"""Get the last response from the agent"""
|
207
|
+
for entry in reversed(self.context_history):
|
208
|
+
if entry.get("role") == "assistant":
|
209
|
+
return entry.get("content")
|
210
|
+
elif entry.get("type") == "llm_interaction":
|
211
|
+
return entry.get("final_response")
|
212
|
+
return None
|
213
|
+
|
214
|
+
async def reset_context(self):
|
215
|
+
"""Reset context"""
|
216
|
+
self.context_history = []
|
217
|
+
await self._save_context()
|
218
|
+
|
219
|
+
async def reset_temp_memory(self):
|
220
|
+
"""Reset only temp memory to default values from example files"""
|
221
|
+
# Load default temp memory from example
|
222
|
+
example_temp_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
223
|
+
"..", "docs", "working_dir_example", "agent", "memory", "temp_memory.json")
|
224
|
+
if os.path.exists(example_temp_path):
|
225
|
+
with open(example_temp_path, 'r', encoding='utf-8') as f:
|
226
|
+
self.temp_memory = json.load(f)
|
227
|
+
else:
|
228
|
+
# Fallback to empty list with one test entry if example file not found
|
229
|
+
self.temp_memory = [
|
230
|
+
{
|
231
|
+
"id": "0test0",
|
232
|
+
"content": "This is a test temp_memory.",
|
233
|
+
"role": "Vedal987",
|
234
|
+
"timestamp": "2024-12-24T00:00:00.000000"
|
235
|
+
}
|
236
|
+
]
|
237
|
+
|
238
|
+
# Save only temp memory
|
239
|
+
await self._save_temp_memory()
|
240
|
+
|
241
|
+
print("Temp memory has been reset to default values from example files")
|
242
|
+
|
243
|
+
async def get_full_context(self) -> str:
|
244
|
+
"""Get all memory as context for LLM"""
|
245
|
+
context_parts = []
|
246
|
+
|
247
|
+
# Add init memory
|
248
|
+
context_parts.append("=== INIT MEMORY (Immutable) ===")
|
249
|
+
for key, value in self.init_memory.items():
|
250
|
+
context_parts.append(f"{key}: {value}")
|
251
|
+
|
252
|
+
# Add core memory
|
253
|
+
context_parts.append("\n=== CORE MEMORY (Long-term, Mutable) ===")
|
254
|
+
if "blocks" in self.core_memory:
|
255
|
+
for block_id, block in self.core_memory["blocks"].items():
|
256
|
+
context_parts.append(f"\nBlock: {block['title']} ({block_id})")
|
257
|
+
context_parts.append(f"Description: {block['description']}")
|
258
|
+
context_parts.append("Content:")
|
259
|
+
for item in block["content"]:
|
260
|
+
context_parts.append(f" - {item}")
|
261
|
+
|
262
|
+
# Add context (recent conversation history)
|
263
|
+
context_parts.append("\n=== CONTEXT (Recent Conversation) ===")
|
264
|
+
recent_context = await self.get_recent_context(5)
|
265
|
+
for i, entry in enumerate(recent_context):
|
266
|
+
# Handle entries with and without 'role' field
|
267
|
+
if "role" in entry:
|
268
|
+
role_display = "User" if entry["role"] == "user" else "Assistant"
|
269
|
+
content = entry.get('content', entry.get('final_response', 'Unknown entry'))
|
270
|
+
context_parts.append(f"{i+1}. [{role_display}] {content}")
|
271
|
+
elif "type" in entry and entry["type"] == "llm_interaction":
|
272
|
+
# For detailed LLM interaction entries with role: assistant
|
273
|
+
if entry.get("role") == "assistant":
|
274
|
+
context_parts.append(f"{i+1}. [Assistant] {entry.get('final_response', 'Processing step')}")
|
275
|
+
else:
|
276
|
+
# For other llm_interaction entries without role
|
277
|
+
context_parts.append(f"{i+1}. [System] {entry.get('final_response', 'Processing step')}")
|
278
|
+
else:
|
279
|
+
# Default fallback
|
280
|
+
context_parts.append(f"{i+1}. [System] {entry.get('content', 'Unknown entry')}")
|
281
|
+
|
282
|
+
# Add temp memory (only for temporary state, not dialog history)
|
283
|
+
if self.temp_memory:
|
284
|
+
context_parts.append("\n=== TEMP MEMORY (Processing State) ===")
|
285
|
+
for item in self.temp_memory:
|
286
|
+
context_parts.append(f"[{item.get('role', 'system')}] {item.get('content', '')}")
|
287
|
+
|
288
|
+
return "\n".join(context_parts)
|
289
|
+
|
290
|
+
async def add_temp_memory(self, content: str, role: str = "system"):
|
291
|
+
"""Add an item to temp memory (for temporary processing state)"""
|
292
|
+
self.temp_memory.append({
|
293
|
+
"id": generate_id(),
|
294
|
+
"content": content,
|
295
|
+
"role": role,
|
296
|
+
"timestamp": datetime.now().isoformat()
|
297
|
+
})
|
298
|
+
|
299
|
+
# Keep only last 20 temp items
|
300
|
+
if len(self.temp_memory) > 20:
|
301
|
+
self.temp_memory = self.temp_memory[-20:]
|
302
|
+
|
303
|
+
await self._save_temp_memory()
|
304
|
+
|
305
|
+
# Core memory management methods
|
306
|
+
async def get_core_memory_blocks(self) -> Dict[str, Any]:
|
307
|
+
"""Get all core memory blocks"""
|
308
|
+
return self.core_memory.get("blocks", {})
|
309
|
+
|
310
|
+
async def get_core_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
|
311
|
+
"""Get a specific core memory block"""
|
312
|
+
blocks = self.core_memory.get("blocks", {})
|
313
|
+
return blocks.get(block_id)
|
314
|
+
|
315
|
+
async def create_core_memory_block(self, title: str, description: str, content: List[str]):
|
316
|
+
"""Create a new core memory block with a generated ID"""
|
317
|
+
block_id = generate_id()
|
318
|
+
|
319
|
+
if "blocks" not in self.core_memory:
|
320
|
+
self.core_memory["blocks"] = {}
|
321
|
+
|
322
|
+
self.core_memory["blocks"][block_id] = {
|
323
|
+
"id": block_id,
|
324
|
+
"title": title,
|
325
|
+
"description": description,
|
326
|
+
"content": content if content else []
|
327
|
+
}
|
328
|
+
|
329
|
+
await self._save_core_memory()
|
330
|
+
return block_id # Return the generated ID
|
331
|
+
|
332
|
+
async def update_core_memory_block(self, block_id: str, title: str = None, description: str = None, content: List[str] = None):
|
333
|
+
"""Update a core memory block"""
|
334
|
+
if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
|
335
|
+
raise ValueError(f"Block '{block_id}' not found")
|
336
|
+
|
337
|
+
block = self.core_memory["blocks"][block_id]
|
338
|
+
if title is not None:
|
339
|
+
block["title"] = title
|
340
|
+
if description is not None:
|
341
|
+
block["description"] = description
|
342
|
+
if content is not None:
|
343
|
+
block["content"] = content
|
344
|
+
|
345
|
+
await self._save_core_memory()
|
346
|
+
|
347
|
+
async def delete_core_memory_block(self, block_id: str):
|
348
|
+
"""Delete a core memory block"""
|
349
|
+
if "blocks" in self.core_memory and block_id in self.core_memory["blocks"]:
|
350
|
+
del self.core_memory["blocks"][block_id]
|
351
|
+
await self._save_core_memory()
|
352
|
+
|
353
|
+
async def add_to_core_memory_block(self, block_id: str, item: str):
|
354
|
+
"""Add an item to a core memory block"""
|
355
|
+
if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
|
356
|
+
raise ValueError(f"Block '{block_id}' not found")
|
357
|
+
|
358
|
+
self.core_memory["blocks"][block_id]["content"].append(item)
|
359
|
+
await self._save_core_memory()
|
360
|
+
|
361
|
+
async def remove_from_core_memory_block(self, block_id: str, index: int):
|
362
|
+
"""Remove an item from a core memory block by index"""
|
363
|
+
if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
|
364
|
+
raise ValueError(f"Block '{block_id}' not found")
|
365
|
+
|
366
|
+
if 0 <= index < len(self.core_memory["blocks"][block_id]["content"]):
|
367
|
+
self.core_memory["blocks"][block_id]["content"].pop(index)
|
368
|
+
await self._save_core_memory()
|
369
|
+
else:
|
370
|
+
raise IndexError(f"Index {index} out of range for block '{block_id}'")
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# agent/memory.py
|
2
|
+
"""
|
3
|
+
Memory management for the Neuro Simulator Agent
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import json
|
8
|
+
import asyncio
|
9
|
+
from typing import Dict, List, Any
|
10
|
+
from datetime import datetime, timedelta
|
11
|
+
|
12
|
+
class MemoryManager:
|
13
|
+
"""Manages both immutable and mutable memory for the agent"""
|
14
|
+
|
15
|
+
def __init__(self, memory_dir: str = "agent_memory"):
|
16
|
+
self.memory_dir = memory_dir
|
17
|
+
self.immutable_memory_file = os.path.join(memory_dir, "immutable_memory.json")
|
18
|
+
self.mutable_memory_file = os.path.join(memory_dir, "mutable_memory.json")
|
19
|
+
self.conversation_history_file = os.path.join(memory_dir, "conversation_history.json")
|
20
|
+
|
21
|
+
# In-memory storage for faster access
|
22
|
+
self.immutable_memory: Dict[str, Any] = {}
|
23
|
+
self.mutable_memory: Dict[str, Any] = {}
|
24
|
+
self.conversation_history: List[Dict[str, Any]] = []
|
25
|
+
|
26
|
+
# Create memory directory if it doesn't exist
|
27
|
+
os.makedirs(self.memory_dir, exist_ok=True)
|
28
|
+
|
29
|
+
async def initialize(self):
|
30
|
+
"""Load memory from files"""
|
31
|
+
# Load immutable memory
|
32
|
+
if os.path.exists(self.immutable_memory_file):
|
33
|
+
with open(self.immutable_memory_file, 'r') as f:
|
34
|
+
self.immutable_memory = json.load(f)
|
35
|
+
else:
|
36
|
+
# Initialize with default immutable data
|
37
|
+
self.immutable_memory = {
|
38
|
+
"name": "Neuro-Sama",
|
39
|
+
"personality": "Friendly, curious, and entertaining AI VTuber",
|
40
|
+
"capabilities": ["chat", "answer questions", "entertain viewers"]
|
41
|
+
}
|
42
|
+
await self._save_immutable_memory()
|
43
|
+
|
44
|
+
# Load mutable memory
|
45
|
+
if os.path.exists(self.mutable_memory_file):
|
46
|
+
with open(self.mutable_memory_file, 'r') as f:
|
47
|
+
self.mutable_memory = json.load(f)
|
48
|
+
else:
|
49
|
+
# Initialize with default mutable data
|
50
|
+
self.mutable_memory = {
|
51
|
+
"mood": "happy",
|
52
|
+
"current_topic": "streaming",
|
53
|
+
"viewer_count": 0
|
54
|
+
}
|
55
|
+
await self._save_mutable_memory()
|
56
|
+
|
57
|
+
# Load conversation history
|
58
|
+
if os.path.exists(self.conversation_history_file):
|
59
|
+
with open(self.conversation_history_file, 'r') as f:
|
60
|
+
self.conversation_history = json.load(f)
|
61
|
+
|
62
|
+
print("Memory manager initialized")
|
63
|
+
|
64
|
+
async def _save_immutable_memory(self):
|
65
|
+
"""Save immutable memory to file"""
|
66
|
+
with open(self.immutable_memory_file, 'w') as f:
|
67
|
+
json.dump(self.immutable_memory, f, indent=2)
|
68
|
+
|
69
|
+
async def _save_mutable_memory(self):
|
70
|
+
"""Save mutable memory to file"""
|
71
|
+
with open(self.mutable_memory_file, 'w') as f:
|
72
|
+
json.dump(self.mutable_memory, f, indent=2)
|
73
|
+
|
74
|
+
async def _save_conversation_history(self):
|
75
|
+
"""Save conversation history to file"""
|
76
|
+
with open(self.conversation_history_file, 'w') as f:
|
77
|
+
json.dump(self.conversation_history, f, indent=2)
|
78
|
+
|
79
|
+
async def reset(self):
|
80
|
+
"""Reset all memory"""
|
81
|
+
self.immutable_memory = {
|
82
|
+
"name": "Neuro-Sama",
|
83
|
+
"personality": "Friendly, curious, and entertaining AI VTuber",
|
84
|
+
"capabilities": ["chat", "answer questions", "entertain viewers"]
|
85
|
+
}
|
86
|
+
self.mutable_memory = {
|
87
|
+
"mood": "happy",
|
88
|
+
"current_topic": "streaming",
|
89
|
+
"viewer_count": 0
|
90
|
+
}
|
91
|
+
self.conversation_history = []
|
92
|
+
|
93
|
+
await self._save_immutable_memory()
|
94
|
+
await self._save_mutable_memory()
|
95
|
+
await self._save_conversation_history()
|
96
|
+
|
97
|
+
print("Memory reset completed")
|
98
|
+
|
99
|
+
async def add_message(self, message: Dict[str, Any]):
|
100
|
+
"""Add a message to conversation history"""
|
101
|
+
self.conversation_history.append(message)
|
102
|
+
# Keep only the last 50 messages
|
103
|
+
if len(self.conversation_history) > 50:
|
104
|
+
self.conversation_history = self.conversation_history[-50:]
|
105
|
+
await self._save_conversation_history()
|
106
|
+
|
107
|
+
async def get_context(self, max_messages: int = 10) -> str:
|
108
|
+
"""Get context from conversation history"""
|
109
|
+
# Get recent messages
|
110
|
+
recent_messages = self.conversation_history[-max_messages:] if self.conversation_history else []
|
111
|
+
|
112
|
+
# Format context
|
113
|
+
context_parts = []
|
114
|
+
|
115
|
+
# Add immutable memory as context
|
116
|
+
context_parts.append("Character Information:")
|
117
|
+
for key, value in self.immutable_memory.items():
|
118
|
+
context_parts.append(f"- {key}: {value}")
|
119
|
+
|
120
|
+
# Add mutable memory as context
|
121
|
+
context_parts.append("\nCurrent State:")
|
122
|
+
for key, value in self.mutable_memory.items():
|
123
|
+
context_parts.append(f"- {key}: {value}")
|
124
|
+
|
125
|
+
# Add conversation history
|
126
|
+
if recent_messages:
|
127
|
+
context_parts.append("\nRecent Conversation:")
|
128
|
+
for msg in recent_messages:
|
129
|
+
context_parts.append(f"{msg['role']}: {msg['content']}")
|
130
|
+
|
131
|
+
return "\n".join(context_parts)
|
132
|
+
|
133
|
+
async def update_mutable_memory(self, updates: Dict[str, Any]):
|
134
|
+
"""Update mutable memory with new values"""
|
135
|
+
self.mutable_memory.update(updates)
|
136
|
+
await self._save_mutable_memory()
|
137
|
+
print(f"Mutable memory updated: {updates}")
|
@@ -0,0 +1,112 @@
|
|
1
|
+
# agent/tools/core.py
|
2
|
+
"""
|
3
|
+
Core tools for the Neuro Simulator Agent
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Dict, List, Any, Optional
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
|
10
|
+
class ToolManager:
|
11
|
+
"""Manages all tools available to the agent"""
|
12
|
+
|
13
|
+
def __init__(self, memory_manager):
|
14
|
+
self.memory_manager = memory_manager
|
15
|
+
self.tools = {}
|
16
|
+
self._register_tools()
|
17
|
+
|
18
|
+
def _register_tools(self):
|
19
|
+
"""Register all available tools"""
|
20
|
+
# Memory management tools
|
21
|
+
self.tools["get_core_memory_blocks"] = self._get_core_memory_blocks
|
22
|
+
self.tools["get_core_memory_block"] = self._get_core_memory_block
|
23
|
+
self.tools["create_core_memory_block"] = self._create_core_memory_block
|
24
|
+
self.tools["update_core_memory_block"] = self._update_core_memory_block
|
25
|
+
self.tools["delete_core_memory_block"] = self._delete_core_memory_block
|
26
|
+
self.tools["add_to_core_memory_block"] = self._add_to_core_memory_block
|
27
|
+
self.tools["remove_from_core_memory_block"] = self._remove_from_core_memory_block
|
28
|
+
self.tools["add_temp_memory"] = self._add_temp_memory
|
29
|
+
|
30
|
+
# Output tool
|
31
|
+
self.tools["speak"] = self._speak
|
32
|
+
|
33
|
+
def get_tool_descriptions(self) -> str:
|
34
|
+
"""Get descriptions of all available tools"""
|
35
|
+
descriptions = [
|
36
|
+
"Available tools:",
|
37
|
+
"1. get_core_memory_blocks() - Get all core memory blocks",
|
38
|
+
"2. get_core_memory_block(block_id: string) - Get a specific core memory block",
|
39
|
+
"3. create_core_memory_block(title: string, description: string, content: list) - Create a new core memory block with a generated ID",
|
40
|
+
"4. update_core_memory_block(block_id: string, title: string (optional), description: string (optional), content: list (optional)) - Update a core memory block",
|
41
|
+
"5. delete_core_memory_block(block_id: string) - Delete a core memory block",
|
42
|
+
"6. add_to_core_memory_block(block_id: string, item: string) - Add an item to a core memory block",
|
43
|
+
"7. remove_from_core_memory_block(block_id: string, index: integer) - Remove an item from a core memory block by index",
|
44
|
+
"8. add_temp_memory(content: string, role: string) - Add an item to temporary memory",
|
45
|
+
"9. speak(text: string) - Output text to the user",
|
46
|
+
"",
|
47
|
+
"IMPORTANT INSTRUCTIONS:",
|
48
|
+
"- When you want to speak to the user, ONLY use the speak tool with your response as the text parameter",
|
49
|
+
"- DO NOT use print() or any other wrapper functions around the speak tool",
|
50
|
+
"- Example of correct usage: speak(text='Hello, how can I help you today?')",
|
51
|
+
"- Example of incorrect usage: print(speak(text='Hello, how can I help you today?'))",
|
52
|
+
"- ONLY return ONE tool call per response",
|
53
|
+
"- Format your response as plain text with the tool call, nothing else"
|
54
|
+
]
|
55
|
+
return "\n".join(descriptions)
|
56
|
+
|
57
|
+
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
|
58
|
+
"""Execute a tool by name with given parameters"""
|
59
|
+
if tool_name not in self.tools:
|
60
|
+
return {"error": f"Tool '{tool_name}' not found"}
|
61
|
+
|
62
|
+
try:
|
63
|
+
result = await self.tools[tool_name](**params)
|
64
|
+
return result
|
65
|
+
except Exception as e:
|
66
|
+
return {"error": f"Error executing tool '{tool_name}': {str(e)}"}
|
67
|
+
|
68
|
+
# Tool implementations
|
69
|
+
async def _get_core_memory_blocks(self) -> Dict[str, Any]:
|
70
|
+
"""Get all core memory blocks"""
|
71
|
+
return await self.memory_manager.get_core_memory_blocks()
|
72
|
+
|
73
|
+
async def _get_core_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
|
74
|
+
"""Get a specific core memory block"""
|
75
|
+
return await self.memory_manager.get_core_memory_block(block_id)
|
76
|
+
|
77
|
+
async def _create_core_memory_block(self, title: str, description: str, content: List[str]) -> str:
|
78
|
+
"""Create a new core memory block with a generated ID"""
|
79
|
+
block_id = await self.memory_manager.create_core_memory_block(title, description, content)
|
80
|
+
return f"Created core memory block '{block_id}' with title '{title}'"
|
81
|
+
|
82
|
+
async def _update_core_memory_block(self, block_id: str, title: str = None, description: str = None, content: List[str] = None) -> str:
|
83
|
+
"""Update a core memory block"""
|
84
|
+
await self.memory_manager.update_core_memory_block(block_id, title, description, content)
|
85
|
+
return f"Updated core memory block '{block_id}'"
|
86
|
+
|
87
|
+
async def _delete_core_memory_block(self, block_id: str) -> str:
|
88
|
+
"""Delete a core memory block"""
|
89
|
+
await self.memory_manager.delete_core_memory_block(block_id)
|
90
|
+
return f"Deleted core memory block '{block_id}'"
|
91
|
+
|
92
|
+
async def _add_to_core_memory_block(self, block_id: str, item: str) -> str:
|
93
|
+
"""Add an item to a core memory block"""
|
94
|
+
await self.memory_manager.add_to_core_memory_block(block_id, item)
|
95
|
+
return f"Added item to core memory block '{block_id}'"
|
96
|
+
|
97
|
+
async def _remove_from_core_memory_block(self, block_id: str, index: int) -> str:
|
98
|
+
"""Remove an item from a core memory block by index"""
|
99
|
+
await self.memory_manager.remove_from_core_memory_block(block_id, index)
|
100
|
+
return f"Removed item from core memory block '{block_id}' at index {index}"
|
101
|
+
|
102
|
+
async def _add_temp_memory(self, content: str, role: str = "user") -> str:
|
103
|
+
"""Add an item to temp memory"""
|
104
|
+
await self.memory_manager.add_temp_memory(content, role)
|
105
|
+
return f"Added item to temp memory with role '{role}'"
|
106
|
+
|
107
|
+
async def _speak(self, text: str) -> str:
|
108
|
+
"""Output text - this is how the agent communicates with users"""
|
109
|
+
print(f"Agent says: {text}")
|
110
|
+
# Note: Context is now managed in the process_messages method
|
111
|
+
# This tool only outputs the text, not stores it in memory
|
112
|
+
return text
|