neuro-simulator 0.1.3__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/__init__.py +1 -10
- neuro_simulator/agent/__init__.py +1 -8
- neuro_simulator/agent/base.py +43 -0
- neuro_simulator/agent/core.py +105 -398
- neuro_simulator/agent/factory.py +30 -0
- neuro_simulator/agent/llm.py +34 -31
- neuro_simulator/agent/memory/__init__.py +1 -4
- neuro_simulator/agent/memory/manager.py +61 -203
- neuro_simulator/agent/tools/__init__.py +1 -4
- neuro_simulator/agent/tools/core.py +8 -18
- neuro_simulator/api/__init__.py +1 -0
- neuro_simulator/api/agent.py +163 -0
- neuro_simulator/api/stream.py +55 -0
- neuro_simulator/api/system.py +90 -0
- neuro_simulator/cli.py +60 -143
- neuro_simulator/core/__init__.py +1 -0
- neuro_simulator/core/agent_factory.py +52 -0
- neuro_simulator/core/agent_interface.py +91 -0
- neuro_simulator/core/application.py +278 -0
- neuro_simulator/services/__init__.py +1 -0
- neuro_simulator/{chatbot.py → services/audience.py} +24 -24
- neuro_simulator/{audio_synthesis.py → services/audio.py} +18 -15
- neuro_simulator/services/builtin.py +87 -0
- neuro_simulator/services/letta.py +206 -0
- neuro_simulator/{stream_manager.py → services/stream.py} +39 -47
- neuro_simulator/utils/__init__.py +1 -0
- neuro_simulator/utils/logging.py +90 -0
- neuro_simulator/utils/process.py +67 -0
- neuro_simulator/{stream_chat.py → utils/queue.py} +17 -4
- neuro_simulator/utils/state.py +14 -0
- neuro_simulator/{websocket_manager.py → utils/websocket.py} +18 -14
- {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/METADATA +83 -33
- neuro_simulator-0.2.1.dist-info/RECORD +37 -0
- neuro_simulator/agent/api.py +0 -737
- neuro_simulator/agent/memory.py +0 -137
- neuro_simulator/agent/tools.py +0 -69
- neuro_simulator/builtin_agent.py +0 -83
- neuro_simulator/config.yaml.example +0 -157
- neuro_simulator/letta.py +0 -164
- neuro_simulator/log_handler.py +0 -43
- neuro_simulator/main.py +0 -673
- neuro_simulator/media/neuro_start.mp4 +0 -0
- neuro_simulator/process_manager.py +0 -70
- neuro_simulator/shared_state.py +0 -11
- neuro_simulator-0.1.3.dist-info/RECORD +0 -31
- /neuro_simulator/{config.py → core/config.py} +0 -0
- {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/WHEEL +0 -0
- {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/entry_points.txt +0 -0
- {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/top_level.txt +0 -0
neuro_simulator/agent/llm.py
CHANGED
@@ -1,30 +1,37 @@
|
|
1
|
-
# agent/llm.py
|
1
|
+
# neuro_simulator/agent/llm.py
|
2
2
|
"""
|
3
|
-
LLM client for the Neuro Simulator
|
3
|
+
LLM client for the Neuro Simulator's built-in agent.
|
4
4
|
"""
|
5
5
|
|
6
|
+
import asyncio
|
7
|
+
import logging
|
8
|
+
from pathlib import Path
|
6
9
|
from typing import Optional
|
7
|
-
import os
|
8
|
-
import sys
|
9
|
-
|
10
|
-
# Add project root to path
|
11
|
-
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
12
10
|
|
13
11
|
from google import genai
|
14
12
|
from google.genai import types
|
15
13
|
from openai import AsyncOpenAI
|
16
|
-
|
14
|
+
|
15
|
+
from ..core.config import config_manager
|
16
|
+
|
17
|
+
# Use a logger with a shortened, more readable name
|
18
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "agent", 1))
|
17
19
|
|
18
20
|
class LLMClient:
|
19
|
-
"""A completely independent LLM client for the built-in agent."""
|
21
|
+
"""A completely independent LLM client for the built-in agent, now with lazy initialization."""
|
20
22
|
|
21
23
|
def __init__(self):
|
22
24
|
self.client = None
|
23
25
|
self.model_name = None
|
24
|
-
self.
|
25
|
-
|
26
|
-
|
27
|
-
|
26
|
+
self._generate_func = None
|
27
|
+
self._initialized = False
|
28
|
+
|
29
|
+
async def _ensure_initialized(self):
|
30
|
+
"""Initializes the client on first use."""
|
31
|
+
if self._initialized:
|
32
|
+
return
|
33
|
+
|
34
|
+
logger.info("First use of built-in agent's LLMClient, performing initialization...")
|
28
35
|
settings = config_manager.settings
|
29
36
|
provider = settings.agent.agent_provider.lower()
|
30
37
|
|
@@ -33,7 +40,6 @@ class LLMClient:
|
|
33
40
|
if not api_key:
|
34
41
|
raise ValueError("GEMINI_API_KEY is not set in configuration for the agent.")
|
35
42
|
|
36
|
-
# Use the new client-based API as per the latest documentation
|
37
43
|
self.client = genai.Client(api_key=api_key)
|
38
44
|
self.model_name = settings.agent.agent_model
|
39
45
|
self._generate_func = self._generate_gemini
|
@@ -52,53 +58,50 @@ class LLMClient:
|
|
52
58
|
else:
|
53
59
|
raise ValueError(f"Unsupported agent provider in config: {settings.agent.agent_provider}")
|
54
60
|
|
55
|
-
|
61
|
+
self._initialized = True
|
62
|
+
logger.info(f"Agent LLM client initialized. Provider: {provider.upper()}, Model: {self.model_name}")
|
56
63
|
|
57
64
|
async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
|
58
|
-
"""Generates text using the Gemini model
|
59
|
-
import asyncio
|
60
|
-
|
65
|
+
"""Generates text using the Gemini model."""
|
61
66
|
generation_config = types.GenerateContentConfig(
|
62
67
|
max_output_tokens=max_tokens,
|
63
|
-
# temperature can be added later if needed from config
|
64
68
|
)
|
65
|
-
|
66
69
|
try:
|
67
|
-
# The new client's generate_content is synchronous, run it in a thread
|
68
70
|
response = await asyncio.to_thread(
|
69
71
|
self.client.models.generate_content,
|
70
72
|
model=self.model_name,
|
71
73
|
contents=prompt,
|
72
74
|
config=generation_config
|
73
75
|
)
|
74
|
-
return response.text if response and response
|
76
|
+
return response.text if response and hasattr(response, 'text') else ""
|
75
77
|
except Exception as e:
|
76
|
-
|
78
|
+
logger.error(f"Error in _generate_gemini: {e}", exc_info=True)
|
77
79
|
return ""
|
78
80
|
|
79
81
|
async def _generate_openai(self, prompt: str, max_tokens: int) -> str:
|
82
|
+
"""Generates text using the OpenAI model."""
|
80
83
|
try:
|
81
84
|
response = await self.client.chat.completions.create(
|
82
85
|
model=self.model_name,
|
83
86
|
messages=[{"role": "user", "content": prompt}],
|
84
87
|
max_tokens=max_tokens,
|
85
|
-
# temperature can be added to config if needed
|
86
88
|
)
|
87
89
|
if response.choices and response.choices[0].message and response.choices[0].message.content:
|
88
90
|
return response.choices[0].message.content.strip()
|
89
91
|
return ""
|
90
92
|
except Exception as e:
|
91
|
-
|
93
|
+
logger.error(f"Error in _generate_openai: {e}", exc_info=True)
|
92
94
|
return ""
|
93
95
|
|
94
96
|
async def generate(self, prompt: str, max_tokens: int = 1000) -> str:
|
95
|
-
"""Generate text using the configured LLM."""
|
96
|
-
|
97
|
-
|
97
|
+
"""Generate text using the configured LLM, ensuring client is initialized."""
|
98
|
+
await self._ensure_initialized()
|
99
|
+
|
100
|
+
if not self.client or not self._generate_func:
|
101
|
+
raise RuntimeError("LLM Client could not be initialized.")
|
98
102
|
try:
|
99
103
|
result = await self._generate_func(prompt, max_tokens)
|
100
|
-
# Ensure we always return a string, even if the result is None
|
101
104
|
return result if result is not None else ""
|
102
105
|
except Exception as e:
|
103
|
-
|
104
|
-
return "My brain is not working, tell Vedal to check the logs."
|
106
|
+
logger.error(f"Error generating text with Agent LLM: {e}", exc_info=True)
|
107
|
+
return "My brain is not working, tell Vedal to check the logs."
|
@@ -1,97 +1,64 @@
|
|
1
|
-
# agent/memory/manager.py
|
1
|
+
# neuro_simulator/agent/memory/manager.py
|
2
2
|
"""
|
3
|
-
Advanced memory management for the Neuro Simulator Agent
|
3
|
+
Advanced memory management for the Neuro Simulator Agent.
|
4
4
|
"""
|
5
5
|
|
6
|
-
import os
|
7
|
-
import json
|
8
6
|
import asyncio
|
9
|
-
|
10
|
-
|
7
|
+
import json
|
8
|
+
import logging
|
9
|
+
import os
|
11
10
|
import random
|
12
11
|
import string
|
13
|
-
import
|
12
|
+
from datetime import datetime
|
13
|
+
from typing import Any, Dict, List, Optional
|
14
14
|
|
15
|
+
# Use the existing agent logger for consistent logging
|
16
|
+
logger = logging.getLogger("neuro_agent")
|
15
17
|
|
16
18
|
def generate_id(length=6) -> str:
|
17
|
-
"""Generate a random ID string"""
|
19
|
+
"""Generate a random ID string."""
|
18
20
|
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
19
21
|
|
20
|
-
|
21
22
|
class MemoryManager:
|
22
|
-
"""Manages different types of memory for the agent"""
|
23
|
+
"""Manages different types of memory for the agent."""
|
23
24
|
|
24
25
|
def __init__(self, working_dir: str = None):
|
25
|
-
# Use provided working directory or default to current directory
|
26
26
|
if working_dir is None:
|
27
27
|
working_dir = os.getcwd()
|
28
28
|
|
29
29
|
self.memory_dir = os.path.join(working_dir, "agent", "memory")
|
30
30
|
os.makedirs(self.memory_dir, exist_ok=True)
|
31
31
|
|
32
|
-
# Memory file paths
|
33
32
|
self.init_memory_file = os.path.join(self.memory_dir, "init_memory.json")
|
34
33
|
self.core_memory_file = os.path.join(self.memory_dir, "core_memory.json")
|
35
|
-
self.context_file = os.path.join(self.memory_dir, "context.json")
|
34
|
+
self.context_file = os.path.join(self.memory_dir, "context.json")
|
36
35
|
self.temp_memory_file = os.path.join(self.memory_dir, "temp_memory.json")
|
37
36
|
|
38
|
-
# In-memory storage
|
39
37
|
self.init_memory: Dict[str, Any] = {}
|
40
38
|
self.core_memory: Dict[str, Any] = {}
|
41
|
-
self.context_history: List[Dict[str, Any]] = []
|
42
|
-
self.temp_memory: List[Dict[str, Any]] = []
|
39
|
+
self.context_history: List[Dict[str, Any]] = []
|
40
|
+
self.temp_memory: List[Dict[str, Any]] = []
|
43
41
|
|
44
42
|
async def initialize(self):
|
45
|
-
"""Load all memory types from files"""
|
46
|
-
# Load init memory
|
43
|
+
"""Load all memory types from files."""
|
44
|
+
# Load init memory
|
47
45
|
if os.path.exists(self.init_memory_file):
|
48
46
|
with open(self.init_memory_file, 'r', encoding='utf-8') as f:
|
49
47
|
self.init_memory = json.load(f)
|
50
48
|
else:
|
51
|
-
# Default init memory - this is just an example, users can customize
|
52
49
|
self.init_memory = {
|
53
|
-
"name": "Neuro-Sama",
|
54
|
-
"role": "AI VTuber",
|
50
|
+
"name": "Neuro-Sama", "role": "AI VTuber",
|
55
51
|
"personality": "Friendly, curious, and entertaining",
|
56
|
-
"capabilities": [
|
57
|
-
"Chat with viewers",
|
58
|
-
"Answer questions",
|
59
|
-
"Entertain audience",
|
60
|
-
"Express opinions"
|
61
|
-
]
|
52
|
+
"capabilities": ["Chat with viewers", "Answer questions"]
|
62
53
|
}
|
63
54
|
await self._save_init_memory()
|
64
55
|
|
65
|
-
# Load core memory
|
56
|
+
# Load core memory
|
66
57
|
if os.path.exists(self.core_memory_file):
|
67
58
|
with open(self.core_memory_file, 'r', encoding='utf-8') as f:
|
68
59
|
self.core_memory = json.load(f)
|
69
60
|
else:
|
70
|
-
|
71
|
-
self.core_memory = {
|
72
|
-
"blocks": {
|
73
|
-
"general_knowledge": {
|
74
|
-
"id": "general_knowledge",
|
75
|
-
"title": "General Knowledge",
|
76
|
-
"description": "Basic facts and knowledge about the world",
|
77
|
-
"content": [
|
78
|
-
"The earth is round",
|
79
|
-
"Water boils at 100°C at sea level",
|
80
|
-
"Humans need oxygen to survive"
|
81
|
-
]
|
82
|
-
},
|
83
|
-
"stream_info": {
|
84
|
-
"id": "stream_info",
|
85
|
-
"title": "Stream Information",
|
86
|
-
"description": "Information about this stream and Neuro-Sama",
|
87
|
-
"content": [
|
88
|
-
"This is a simulation of Neuro-Sama, an AI VTuber",
|
89
|
-
"The stream is meant for entertainment and experimentation",
|
90
|
-
"Viewers can interact with Neuro-Sama through chat"
|
91
|
-
]
|
92
|
-
}
|
93
|
-
}
|
94
|
-
}
|
61
|
+
self.core_memory = {"blocks": {}}
|
95
62
|
await self._save_core_memory()
|
96
63
|
|
97
64
|
# Load context history
|
@@ -101,246 +68,137 @@ class MemoryManager:
|
|
101
68
|
else:
|
102
69
|
self.context_history = []
|
103
70
|
|
104
|
-
# Load temp memory
|
71
|
+
# Load temp memory
|
105
72
|
if os.path.exists(self.temp_memory_file):
|
106
73
|
with open(self.temp_memory_file, 'r', encoding='utf-8') as f:
|
107
74
|
self.temp_memory = json.load(f)
|
75
|
+
else:
|
76
|
+
self.temp_memory = []
|
108
77
|
|
109
|
-
|
78
|
+
logger.info("Agent memory manager initialized.")
|
110
79
|
|
111
80
|
async def _save_init_memory(self):
|
112
|
-
"""Save init memory to file"""
|
113
81
|
with open(self.init_memory_file, 'w', encoding='utf-8') as f:
|
114
82
|
json.dump(self.init_memory, f, ensure_ascii=False, indent=2)
|
115
83
|
|
116
84
|
async def update_init_memory(self, new_memory: Dict[str, Any]):
|
117
|
-
"""Update init memory with new values"""
|
118
85
|
self.init_memory.update(new_memory)
|
119
86
|
await self._save_init_memory()
|
120
87
|
|
121
88
|
async def _save_core_memory(self):
|
122
|
-
"""Save core memory to file"""
|
123
89
|
with open(self.core_memory_file, 'w', encoding='utf-8') as f:
|
124
90
|
json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
|
125
91
|
|
126
92
|
async def _save_context(self):
|
127
|
-
"""Save context to file"""
|
128
93
|
with open(self.context_file, 'w', encoding='utf-8') as f:
|
129
94
|
json.dump(self.context_history, f, ensure_ascii=False, indent=2)
|
130
95
|
|
131
96
|
async def _save_temp_memory(self):
|
132
|
-
"""Save temp memory to file"""
|
133
97
|
with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
|
134
98
|
json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
|
135
99
|
|
136
100
|
async def add_context_entry(self, role: str, content: str):
|
137
|
-
"""
|
138
|
-
entry = {
|
139
|
-
"id": generate_id(),
|
140
|
-
"role": role, # "user" or "assistant"
|
141
|
-
"content": content,
|
142
|
-
"timestamp": datetime.now().isoformat()
|
143
|
-
}
|
101
|
+
entry = {"id": generate_id(), "role": role, "content": content, "timestamp": datetime.now().isoformat()}
|
144
102
|
self.context_history.append(entry)
|
145
|
-
|
146
103
|
await self._save_context()
|
147
104
|
|
148
105
|
async def add_detailed_context_entry(self, input_messages: List[Dict[str, str]],
|
149
106
|
prompt: str, llm_response: str,
|
150
107
|
tool_executions: List[Dict[str, Any]],
|
151
|
-
final_response: str,
|
152
|
-
|
153
|
-
|
154
|
-
|
108
|
+
final_response: str, entry_id: str = None):
|
109
|
+
update_data = {
|
110
|
+
"input_messages": input_messages, "prompt": prompt, "llm_response": llm_response,
|
111
|
+
"tool_executions": tool_executions, "final_response": final_response,
|
112
|
+
"timestamp": datetime.now().isoformat()
|
113
|
+
}
|
155
114
|
if entry_id:
|
156
|
-
# Find the entry with the given ID and update it
|
157
115
|
for entry in self.context_history:
|
158
116
|
if entry.get("id") == entry_id:
|
159
|
-
entry.update(
|
160
|
-
"input_messages": input_messages,
|
161
|
-
"prompt": prompt,
|
162
|
-
"llm_response": llm_response,
|
163
|
-
"tool_executions": tool_executions,
|
164
|
-
"final_response": final_response,
|
165
|
-
"timestamp": datetime.now().isoformat()
|
166
|
-
})
|
117
|
+
entry.update(update_data)
|
167
118
|
await self._save_context()
|
168
119
|
return entry_id
|
169
120
|
|
170
|
-
|
171
|
-
|
172
|
-
"id": entry_id or generate_id(),
|
173
|
-
"type": "llm_interaction",
|
174
|
-
"role": "assistant", # Add role for llm_interaction entries
|
175
|
-
"input_messages": input_messages,
|
176
|
-
"prompt": prompt,
|
177
|
-
"llm_response": llm_response,
|
178
|
-
"tool_executions": tool_executions,
|
179
|
-
"final_response": final_response,
|
180
|
-
"timestamp": datetime.now().isoformat()
|
181
|
-
}
|
182
|
-
self.context_history.append(entry)
|
183
|
-
|
121
|
+
new_entry = {"id": entry_id or generate_id(), "type": "llm_interaction", "role": "assistant", **update_data}
|
122
|
+
self.context_history.append(new_entry)
|
184
123
|
await self._save_context()
|
185
|
-
return
|
124
|
+
return new_entry["id"]
|
186
125
|
|
187
126
|
async def get_recent_context(self, entries: int = 10) -> List[Dict[str, Any]]:
|
188
|
-
|
189
|
-
return self.context_history[-entries:] if self.context_history else []
|
127
|
+
return self.context_history[-entries:]
|
190
128
|
|
191
129
|
async def get_detailed_context_history(self) -> List[Dict[str, Any]]:
|
192
|
-
"""Get the full detailed context history"""
|
193
130
|
return self.context_history
|
194
131
|
|
195
132
|
async def get_last_agent_response(self) -> Optional[str]:
|
196
|
-
"""Get the last response from the agent"""
|
197
133
|
for entry in reversed(self.context_history):
|
198
|
-
if entry.get("
|
199
|
-
|
200
|
-
|
134
|
+
if entry.get("type") == "llm_interaction":
|
135
|
+
final_response = entry.get("final_response", "")
|
136
|
+
if final_response and final_response not in ["Processing started", "Prompt sent to LLM", "LLM response received"]:
|
137
|
+
return final_response
|
138
|
+
elif entry.get("role") == "assistant":
|
139
|
+
content = entry.get("content", "")
|
201
140
|
if content and content != "Processing started":
|
202
141
|
return content
|
203
|
-
elif entry.get("type") == "llm_interaction":
|
204
|
-
final_response = entry.get("final_response")
|
205
|
-
# Skip placeholder responses
|
206
|
-
if final_response and final_response != "Processing started" and final_response != "Prompt sent to LLM" and final_response != "LLM response received":
|
207
|
-
return final_response
|
208
142
|
return None
|
209
143
|
|
210
144
|
async def reset_context(self):
|
211
|
-
"""Reset context"""
|
212
145
|
self.context_history = []
|
213
146
|
await self._save_context()
|
214
147
|
|
215
148
|
async def reset_temp_memory(self):
|
216
|
-
"""Reset
|
217
|
-
|
218
|
-
example_temp_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
219
|
-
"..", "docs", "working_dir_example", "agent", "memory", "temp_memory.json")
|
220
|
-
if os.path.exists(example_temp_path):
|
221
|
-
with open(example_temp_path, 'r', encoding='utf-8') as f:
|
222
|
-
self.temp_memory = json.load(f)
|
223
|
-
else:
|
224
|
-
# Fallback to empty list with one test entry if example file not found
|
225
|
-
self.temp_memory = [
|
226
|
-
{
|
227
|
-
"id": "0test0",
|
228
|
-
"content": "This is a test temp_memory.",
|
229
|
-
"role": "Vedal987",
|
230
|
-
"timestamp": "2024-12-24T00:00:00.000000"
|
231
|
-
}
|
232
|
-
]
|
233
|
-
|
234
|
-
# Save only temp memory
|
149
|
+
"""Reset temp memory to a default empty state."""
|
150
|
+
self.temp_memory = []
|
235
151
|
await self._save_temp_memory()
|
236
|
-
|
237
|
-
print("Temp memory has been reset to default values from example files")
|
152
|
+
logger.info("Agent temp memory has been reset.")
|
238
153
|
|
239
154
|
async def get_full_context(self) -> str:
|
240
|
-
"
|
241
|
-
context_parts = []
|
242
|
-
|
243
|
-
# Add init memory
|
244
|
-
context_parts.append("=== INIT MEMORY (Immutable) ===")
|
245
|
-
for key, value in self.init_memory.items():
|
246
|
-
context_parts.append(f"{key}: {value}")
|
247
|
-
|
248
|
-
# Add core memory
|
155
|
+
context_parts = ["=== INIT MEMORY (Immutable) ===", json.dumps(self.init_memory, indent=2)]
|
249
156
|
context_parts.append("\n=== CORE MEMORY (Long-term, Mutable) ===")
|
250
157
|
if "blocks" in self.core_memory:
|
251
158
|
for block_id, block in self.core_memory["blocks"].items():
|
252
|
-
context_parts.append(f"\nBlock: {block
|
253
|
-
context_parts.append(f"Description: {block
|
159
|
+
context_parts.append(f"\nBlock: {block.get('title', '')} ({block_id})")
|
160
|
+
context_parts.append(f"Description: {block.get('description', '')}")
|
254
161
|
context_parts.append("Content:")
|
255
|
-
for item in block
|
162
|
+
for item in block.get("content", []):
|
256
163
|
context_parts.append(f" - {item}")
|
257
|
-
|
258
|
-
# Add temp memory (only for temporary state, not dialog history)
|
259
164
|
if self.temp_memory:
|
260
165
|
context_parts.append("\n=== TEMP MEMORY (Processing State) ===")
|
261
166
|
for item in self.temp_memory:
|
262
167
|
context_parts.append(f"[{item.get('role', 'system')}] {item.get('content', '')}")
|
263
|
-
|
264
168
|
return "\n".join(context_parts)
|
265
169
|
|
266
170
|
async def add_temp_memory(self, content: str, role: str = "system"):
|
267
|
-
"""
|
268
|
-
self.temp_memory.append({
|
269
|
-
"id": generate_id(),
|
270
|
-
"content": content,
|
271
|
-
"role": role,
|
272
|
-
"timestamp": datetime.now().isoformat()
|
273
|
-
})
|
274
|
-
|
275
|
-
# Keep only last 20 temp items
|
171
|
+
self.temp_memory.append({"id": generate_id(), "content": content, "role": role, "timestamp": datetime.now().isoformat()})
|
276
172
|
if len(self.temp_memory) > 20:
|
277
173
|
self.temp_memory = self.temp_memory[-20:]
|
278
|
-
|
279
174
|
await self._save_temp_memory()
|
280
175
|
|
281
|
-
# Core memory management methods
|
282
176
|
async def get_core_memory_blocks(self) -> Dict[str, Any]:
|
283
|
-
"""Get all core memory blocks"""
|
284
177
|
return self.core_memory.get("blocks", {})
|
285
178
|
|
286
179
|
async def get_core_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
|
287
|
-
""
|
288
|
-
blocks = self.core_memory.get("blocks", {})
|
289
|
-
return blocks.get(block_id)
|
180
|
+
return self.core_memory.get("blocks", {}).get(block_id)
|
290
181
|
|
291
|
-
async def create_core_memory_block(self, title: str, description: str, content: List[str]):
|
292
|
-
"""Create a new core memory block with a generated ID"""
|
182
|
+
async def create_core_memory_block(self, title: str, description: str, content: List[str]) -> str:
|
293
183
|
block_id = generate_id()
|
294
|
-
|
295
184
|
if "blocks" not in self.core_memory:
|
296
185
|
self.core_memory["blocks"] = {}
|
297
|
-
|
298
186
|
self.core_memory["blocks"][block_id] = {
|
299
|
-
"id": block_id,
|
300
|
-
"title": title,
|
301
|
-
"description": description,
|
302
|
-
"content": content if content else []
|
187
|
+
"id": block_id, "title": title, "description": description, "content": content or []
|
303
188
|
}
|
304
|
-
|
305
189
|
await self._save_core_memory()
|
306
|
-
return block_id
|
190
|
+
return block_id
|
307
191
|
|
308
|
-
async def update_core_memory_block(self, block_id: str, title: str = None, description: str = None, content: List[str] = None):
|
309
|
-
|
310
|
-
if
|
192
|
+
async def update_core_memory_block(self, block_id: str, title: Optional[str] = None, description: Optional[str] = None, content: Optional[List[str]] = None):
|
193
|
+
block = self.core_memory.get("blocks", {}).get(block_id)
|
194
|
+
if not block:
|
311
195
|
raise ValueError(f"Block '{block_id}' not found")
|
312
|
-
|
313
|
-
|
314
|
-
if
|
315
|
-
block["title"] = title
|
316
|
-
if description is not None:
|
317
|
-
block["description"] = description
|
318
|
-
if content is not None:
|
319
|
-
block["content"] = content
|
320
|
-
|
196
|
+
if title is not None: block["title"] = title
|
197
|
+
if description is not None: block["description"] = description
|
198
|
+
if content is not None: block["content"] = content
|
321
199
|
await self._save_core_memory()
|
322
200
|
|
323
201
|
async def delete_core_memory_block(self, block_id: str):
|
324
|
-
"""Delete a core memory block"""
|
325
202
|
if "blocks" in self.core_memory and block_id in self.core_memory["blocks"]:
|
326
203
|
del self.core_memory["blocks"][block_id]
|
327
|
-
await self._save_core_memory()
|
328
|
-
|
329
|
-
async def add_to_core_memory_block(self, block_id: str, item: str):
|
330
|
-
"""Add an item to a core memory block"""
|
331
|
-
if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
|
332
|
-
raise ValueError(f"Block '{block_id}' not found")
|
333
|
-
|
334
|
-
self.core_memory["blocks"][block_id]["content"].append(item)
|
335
|
-
await self._save_core_memory()
|
336
|
-
|
337
|
-
async def remove_from_core_memory_block(self, block_id: str, index: int):
|
338
|
-
"""Remove an item from a core memory block by index"""
|
339
|
-
if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
|
340
|
-
raise ValueError(f"Block '{block_id}' not found")
|
341
|
-
|
342
|
-
if 0 <= index < len(self.core_memory["blocks"][block_id]["content"]):
|
343
|
-
self.core_memory["blocks"][block_id]["content"].pop(index)
|
344
|
-
await self._save_core_memory()
|
345
|
-
else:
|
346
|
-
raise IndexError(f"Index {index} out of range for block '{block_id}'")
|
204
|
+
await self._save_core_memory()
|
@@ -1,11 +1,14 @@
|
|
1
|
-
# agent/tools/core.py
|
1
|
+
# neuro_simulator/agent/tools/core.py
|
2
2
|
"""
|
3
3
|
Core tools for the Neuro Simulator Agent
|
4
4
|
"""
|
5
5
|
|
6
|
+
import logging
|
7
|
+
from pathlib import Path
|
6
8
|
from typing import Dict, List, Any, Optional
|
7
|
-
|
8
|
-
|
9
|
+
|
10
|
+
# Use a logger with a shortened, more readable name
|
11
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "agent", 1))
|
9
12
|
|
10
13
|
class ToolManager:
|
11
14
|
"""Manages all tools available to the agent"""
|
@@ -17,7 +20,6 @@ class ToolManager:
|
|
17
20
|
|
18
21
|
def _register_tools(self):
|
19
22
|
"""Register all available tools"""
|
20
|
-
# Memory management tools
|
21
23
|
self.tools["get_core_memory_blocks"] = self._get_core_memory_blocks
|
22
24
|
self.tools["get_core_memory_block"] = self._get_core_memory_block
|
23
25
|
self.tools["create_core_memory_block"] = self._create_core_memory_block
|
@@ -26,8 +28,6 @@ class ToolManager:
|
|
26
28
|
self.tools["add_to_core_memory_block"] = self._add_to_core_memory_block
|
27
29
|
self.tools["remove_from_core_memory_block"] = self._remove_from_core_memory_block
|
28
30
|
self.tools["add_temp_memory"] = self._add_temp_memory
|
29
|
-
|
30
|
-
# Output tool
|
31
31
|
self.tools["speak"] = self._speak
|
32
32
|
|
33
33
|
def get_tool_descriptions(self) -> str:
|
@@ -67,46 +67,36 @@ class ToolManager:
|
|
67
67
|
|
68
68
|
# Tool implementations
|
69
69
|
async def _get_core_memory_blocks(self) -> Dict[str, Any]:
|
70
|
-
"""Get all core memory blocks"""
|
71
70
|
return await self.memory_manager.get_core_memory_blocks()
|
72
71
|
|
73
72
|
async def _get_core_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
|
74
|
-
"""Get a specific core memory block"""
|
75
73
|
return await self.memory_manager.get_core_memory_block(block_id)
|
76
74
|
|
77
75
|
async def _create_core_memory_block(self, title: str, description: str, content: List[str]) -> str:
|
78
|
-
"""Create a new core memory block with a generated ID"""
|
79
76
|
block_id = await self.memory_manager.create_core_memory_block(title, description, content)
|
80
77
|
return f"Created core memory block '{block_id}' with title '{title}'"
|
81
78
|
|
82
79
|
async def _update_core_memory_block(self, block_id: str, title: str = None, description: str = None, content: List[str] = None) -> str:
|
83
|
-
"""Update a core memory block"""
|
84
80
|
await self.memory_manager.update_core_memory_block(block_id, title, description, content)
|
85
81
|
return f"Updated core memory block '{block_id}'"
|
86
82
|
|
87
83
|
async def _delete_core_memory_block(self, block_id: str) -> str:
|
88
|
-
"""Delete a core memory block"""
|
89
84
|
await self.memory_manager.delete_core_memory_block(block_id)
|
90
85
|
return f"Deleted core memory block '{block_id}'"
|
91
86
|
|
92
87
|
async def _add_to_core_memory_block(self, block_id: str, item: str) -> str:
|
93
|
-
"""Add an item to a core memory block"""
|
94
88
|
await self.memory_manager.add_to_core_memory_block(block_id, item)
|
95
89
|
return f"Added item to core memory block '{block_id}'"
|
96
90
|
|
97
91
|
async def _remove_from_core_memory_block(self, block_id: str, index: int) -> str:
|
98
|
-
"""Remove an item from a core memory block by index"""
|
99
92
|
await self.memory_manager.remove_from_core_memory_block(block_id, index)
|
100
93
|
return f"Removed item from core memory block '{block_id}' at index {index}"
|
101
94
|
|
102
95
|
async def _add_temp_memory(self, content: str, role: str = "user") -> str:
|
103
|
-
"""Add an item to temp memory"""
|
104
96
|
await self.memory_manager.add_temp_memory(content, role)
|
105
97
|
return f"Added item to temp memory with role '{role}'"
|
106
98
|
|
107
99
|
async def _speak(self, text: str) -> str:
|
108
100
|
"""Output text - this is how the agent communicates with users"""
|
109
|
-
|
110
|
-
|
111
|
-
# This tool only outputs the text, not stores it in memory
|
112
|
-
return text
|
101
|
+
logger.info(f"Agent says: {text}")
|
102
|
+
return text
|
@@ -0,0 +1 @@
|
|
1
|
+
# This file makes the 'api' directory a Python package.
|