neuro-simulator 0.1.2__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. neuro_simulator/__init__.py +1 -10
  2. neuro_simulator/agent/__init__.py +1 -8
  3. neuro_simulator/agent/base.py +43 -0
  4. neuro_simulator/agent/core.py +111 -374
  5. neuro_simulator/agent/factory.py +30 -0
  6. neuro_simulator/agent/llm.py +34 -31
  7. neuro_simulator/agent/memory/__init__.py +1 -4
  8. neuro_simulator/agent/memory/manager.py +64 -230
  9. neuro_simulator/agent/tools/__init__.py +1 -4
  10. neuro_simulator/agent/tools/core.py +8 -18
  11. neuro_simulator/api/__init__.py +1 -0
  12. neuro_simulator/api/agent.py +163 -0
  13. neuro_simulator/api/stream.py +55 -0
  14. neuro_simulator/api/system.py +90 -0
  15. neuro_simulator/cli.py +53 -142
  16. neuro_simulator/core/__init__.py +1 -0
  17. neuro_simulator/core/agent_factory.py +52 -0
  18. neuro_simulator/core/agent_interface.py +91 -0
  19. neuro_simulator/core/application.py +278 -0
  20. neuro_simulator/services/__init__.py +1 -0
  21. neuro_simulator/{chatbot.py → services/audience.py} +24 -24
  22. neuro_simulator/{audio_synthesis.py → services/audio.py} +18 -15
  23. neuro_simulator/services/builtin.py +87 -0
  24. neuro_simulator/services/letta.py +206 -0
  25. neuro_simulator/{stream_manager.py → services/stream.py} +39 -47
  26. neuro_simulator/utils/__init__.py +1 -0
  27. neuro_simulator/utils/logging.py +90 -0
  28. neuro_simulator/utils/process.py +67 -0
  29. neuro_simulator/{stream_chat.py → utils/queue.py} +17 -4
  30. neuro_simulator/utils/state.py +14 -0
  31. neuro_simulator/{websocket_manager.py → utils/websocket.py} +18 -14
  32. {neuro_simulator-0.1.2.dist-info → neuro_simulator-0.2.0.dist-info}/METADATA +176 -176
  33. neuro_simulator-0.2.0.dist-info/RECORD +37 -0
  34. neuro_simulator/agent/api.py +0 -737
  35. neuro_simulator/agent/memory.py +0 -137
  36. neuro_simulator/agent/tools.py +0 -69
  37. neuro_simulator/builtin_agent.py +0 -83
  38. neuro_simulator/config.yaml.example +0 -157
  39. neuro_simulator/letta.py +0 -164
  40. neuro_simulator/log_handler.py +0 -43
  41. neuro_simulator/main.py +0 -673
  42. neuro_simulator/media/neuro_start.mp4 +0 -0
  43. neuro_simulator/process_manager.py +0 -70
  44. neuro_simulator/shared_state.py +0 -11
  45. neuro_simulator-0.1.2.dist-info/RECORD +0 -31
  46. /neuro_simulator/{config.py → core/config.py} +0 -0
  47. {neuro_simulator-0.1.2.dist-info → neuro_simulator-0.2.0.dist-info}/WHEEL +0 -0
  48. {neuro_simulator-0.1.2.dist-info → neuro_simulator-0.2.0.dist-info}/entry_points.txt +0 -0
  49. {neuro_simulator-0.1.2.dist-info → neuro_simulator-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,30 @@
1
+ # agent/factory.py
2
+ """Factory for creating agent instances"""
3
+
4
+ from .base import BaseAgent
5
+ from ..config import config_manager
6
+
7
+
8
+ async def create_agent() -> BaseAgent:
9
+ """Create an agent instance based on the configuration"""
10
+ agent_type = config_manager.settings.agent_type
11
+
12
+ if agent_type == "builtin":
13
+ from ..builtin_agent import local_agent, BuiltinAgentWrapper, initialize_builtin_agent
14
+ if local_agent is None:
15
+ # Try to initialize the builtin agent
16
+ await initialize_builtin_agent()
17
+ # Re-import local_agent after initialization
18
+ from ..builtin_agent import local_agent
19
+ if local_agent is None:
20
+ raise RuntimeError("Failed to initialize Builtin agent")
21
+ return BuiltinAgentWrapper(local_agent)
22
+ elif agent_type == "letta":
23
+ from ..letta import get_letta_agent, initialize_letta_client
24
+ # Try to initialize the letta client
25
+ initialize_letta_client()
26
+ agent = get_letta_agent()
27
+ await agent.initialize()
28
+ return agent
29
+ else:
30
+ raise ValueError(f"Unknown agent type: {agent_type}")
@@ -1,30 +1,37 @@
1
- # agent/llm.py
1
+ # neuro_simulator/agent/llm.py
2
2
  """
3
- LLM client for the Neuro Simulator Agent
3
+ LLM client for the Neuro Simulator's built-in agent.
4
4
  """
5
5
 
6
+ import asyncio
7
+ import logging
8
+ from pathlib import Path
6
9
  from typing import Optional
7
- import os
8
- import sys
9
-
10
- # Add project root to path
11
- sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
12
10
 
13
11
  from google import genai
14
12
  from google.genai import types
15
13
  from openai import AsyncOpenAI
16
- from ..config import config_manager
14
+
15
+ from ..core.config import config_manager
16
+
17
+ # Use a logger with a shortened, more readable name
18
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "agent", 1))
17
19
 
18
20
  class LLMClient:
19
- """A completely independent LLM client for the built-in agent."""
21
+ """A completely independent LLM client for the built-in agent, now with lazy initialization."""
20
22
 
21
23
  def __init__(self):
22
24
  self.client = None
23
25
  self.model_name = None
24
- self._initialize_client()
25
-
26
- def _initialize_client(self):
27
- """Initializes the LLM client based on the 'agent' section of the config."""
26
+ self._generate_func = None
27
+ self._initialized = False
28
+
29
+ async def _ensure_initialized(self):
30
+ """Initializes the client on first use."""
31
+ if self._initialized:
32
+ return
33
+
34
+ logger.info("First use of built-in agent's LLMClient, performing initialization...")
28
35
  settings = config_manager.settings
29
36
  provider = settings.agent.agent_provider.lower()
30
37
 
@@ -33,7 +40,6 @@ class LLMClient:
33
40
  if not api_key:
34
41
  raise ValueError("GEMINI_API_KEY is not set in configuration for the agent.")
35
42
 
36
- # Use the new client-based API as per the latest documentation
37
43
  self.client = genai.Client(api_key=api_key)
38
44
  self.model_name = settings.agent.agent_model
39
45
  self._generate_func = self._generate_gemini
@@ -52,53 +58,50 @@ class LLMClient:
52
58
  else:
53
59
  raise ValueError(f"Unsupported agent provider in config: {settings.agent.agent_provider}")
54
60
 
55
- print(f"Agent LLM client initialized. Provider: {provider.upper()}, Model: {self.model_name}")
61
+ self._initialized = True
62
+ logger.info(f"Agent LLM client initialized. Provider: {provider.upper()}, Model: {self.model_name}")
56
63
 
57
64
  async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
58
- """Generates text using the Gemini model with the new SDK."""
59
- import asyncio
60
-
65
+ """Generates text using the Gemini model."""
61
66
  generation_config = types.GenerateContentConfig(
62
67
  max_output_tokens=max_tokens,
63
- # temperature can be added later if needed from config
64
68
  )
65
-
66
69
  try:
67
- # The new client's generate_content is synchronous, run it in a thread
68
70
  response = await asyncio.to_thread(
69
71
  self.client.models.generate_content,
70
72
  model=self.model_name,
71
73
  contents=prompt,
72
74
  config=generation_config
73
75
  )
74
- return response.text if response and response.text else ""
76
+ return response.text if response and hasattr(response, 'text') else ""
75
77
  except Exception as e:
76
- print(f"Error in _generate_gemini: {e}")
78
+ logger.error(f"Error in _generate_gemini: {e}", exc_info=True)
77
79
  return ""
78
80
 
79
81
  async def _generate_openai(self, prompt: str, max_tokens: int) -> str:
82
+ """Generates text using the OpenAI model."""
80
83
  try:
81
84
  response = await self.client.chat.completions.create(
82
85
  model=self.model_name,
83
86
  messages=[{"role": "user", "content": prompt}],
84
87
  max_tokens=max_tokens,
85
- # temperature can be added to config if needed
86
88
  )
87
89
  if response.choices and response.choices[0].message and response.choices[0].message.content:
88
90
  return response.choices[0].message.content.strip()
89
91
  return ""
90
92
  except Exception as e:
91
- print(f"Error in _generate_openai: {e}")
93
+ logger.error(f"Error in _generate_openai: {e}", exc_info=True)
92
94
  return ""
93
95
 
94
96
  async def generate(self, prompt: str, max_tokens: int = 1000) -> str:
95
- """Generate text using the configured LLM."""
96
- if not self.client:
97
- raise RuntimeError("LLM Client is not initialized.")
97
+ """Generate text using the configured LLM, ensuring client is initialized."""
98
+ await self._ensure_initialized()
99
+
100
+ if not self.client or not self._generate_func:
101
+ raise RuntimeError("LLM Client could not be initialized.")
98
102
  try:
99
103
  result = await self._generate_func(prompt, max_tokens)
100
- # Ensure we always return a string, even if the result is None
101
104
  return result if result is not None else ""
102
105
  except Exception as e:
103
- print(f"Error generating text with Agent LLM: {e}")
104
- return "My brain is not working, tell Vedal to check the logs."
106
+ logger.error(f"Error generating text with Agent LLM: {e}", exc_info=True)
107
+ return "My brain is not working, tell Vedal to check the logs."
@@ -1,4 +1 @@
1
- # agent/memory/__init__.py
2
- """
3
- Memory module for the Neuro Simulator Agent
4
- """
1
+ # neuro_simulator.agent.memory package
@@ -1,97 +1,64 @@
1
- # agent/memory/manager.py
1
+ # neuro_simulator/agent/memory/manager.py
2
2
  """
3
- Advanced memory management for the Neuro Simulator Agent
3
+ Advanced memory management for the Neuro Simulator Agent.
4
4
  """
5
5
 
6
- import os
7
- import json
8
6
  import asyncio
9
- from typing import Dict, List, Any, Optional
10
- from datetime import datetime
7
+ import json
8
+ import logging
9
+ import os
11
10
  import random
12
11
  import string
13
- import sys
12
+ from datetime import datetime
13
+ from typing import Any, Dict, List, Optional
14
14
 
15
+ # Use the existing agent logger for consistent logging
16
+ logger = logging.getLogger("neuro_agent")
15
17
 
16
18
  def generate_id(length=6) -> str:
17
- """Generate a random ID string"""
19
+ """Generate a random ID string."""
18
20
  return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
19
21
 
20
-
21
22
  class MemoryManager:
22
- """Manages different types of memory for the agent"""
23
+ """Manages different types of memory for the agent."""
23
24
 
24
25
  def __init__(self, working_dir: str = None):
25
- # Use provided working directory or default to current directory
26
26
  if working_dir is None:
27
27
  working_dir = os.getcwd()
28
28
 
29
29
  self.memory_dir = os.path.join(working_dir, "agent", "memory")
30
30
  os.makedirs(self.memory_dir, exist_ok=True)
31
31
 
32
- # Memory file paths
33
32
  self.init_memory_file = os.path.join(self.memory_dir, "init_memory.json")
34
33
  self.core_memory_file = os.path.join(self.memory_dir, "core_memory.json")
35
- self.context_file = os.path.join(self.memory_dir, "context.json") # 新的上下文文件
34
+ self.context_file = os.path.join(self.memory_dir, "context.json")
36
35
  self.temp_memory_file = os.path.join(self.memory_dir, "temp_memory.json")
37
36
 
38
- # In-memory storage
39
37
  self.init_memory: Dict[str, Any] = {}
40
38
  self.core_memory: Dict[str, Any] = {}
41
- self.context_history: List[Dict[str, Any]] = [] # 新的上下文历史
42
- self.temp_memory: List[Dict[str, Any]] = [] # 真正的临时内存
39
+ self.context_history: List[Dict[str, Any]] = []
40
+ self.temp_memory: List[Dict[str, Any]] = []
43
41
 
44
42
  async def initialize(self):
45
- """Load all memory types from files"""
46
- # Load init memory (immutable by agent)
43
+ """Load all memory types from files."""
44
+ # Load init memory
47
45
  if os.path.exists(self.init_memory_file):
48
46
  with open(self.init_memory_file, 'r', encoding='utf-8') as f:
49
47
  self.init_memory = json.load(f)
50
48
  else:
51
- # Default init memory - this is just an example, users can customize
52
49
  self.init_memory = {
53
- "name": "Neuro-Sama",
54
- "role": "AI VTuber",
50
+ "name": "Neuro-Sama", "role": "AI VTuber",
55
51
  "personality": "Friendly, curious, and entertaining",
56
- "capabilities": [
57
- "Chat with viewers",
58
- "Answer questions",
59
- "Entertain audience",
60
- "Express opinions"
61
- ]
52
+ "capabilities": ["Chat with viewers", "Answer questions"]
62
53
  }
63
54
  await self._save_init_memory()
64
55
 
65
- # Load core memory (mutable by both agent and user)
56
+ # Load core memory
66
57
  if os.path.exists(self.core_memory_file):
67
58
  with open(self.core_memory_file, 'r', encoding='utf-8') as f:
68
59
  self.core_memory = json.load(f)
69
60
  else:
70
- # Default core memory with blocks
71
- self.core_memory = {
72
- "blocks": {
73
- "general_knowledge": {
74
- "id": "general_knowledge",
75
- "title": "General Knowledge",
76
- "description": "Basic facts and knowledge about the world",
77
- "content": [
78
- "The earth is round",
79
- "Water boils at 100°C at sea level",
80
- "Humans need oxygen to survive"
81
- ]
82
- },
83
- "stream_info": {
84
- "id": "stream_info",
85
- "title": "Stream Information",
86
- "description": "Information about this stream and Neuro-Sama",
87
- "content": [
88
- "This is a simulation of Neuro-Sama, an AI VTuber",
89
- "The stream is meant for entertainment and experimentation",
90
- "Viewers can interact with Neuro-Sama through chat"
91
- ]
92
- }
93
- }
94
- }
61
+ self.core_memory = {"blocks": {}}
95
62
  await self._save_core_memory()
96
63
 
97
64
  # Load context history
@@ -101,270 +68,137 @@ class MemoryManager:
101
68
  else:
102
69
  self.context_history = []
103
70
 
104
- # Load temp memory (frequently changed by agent)
71
+ # Load temp memory
105
72
  if os.path.exists(self.temp_memory_file):
106
73
  with open(self.temp_memory_file, 'r', encoding='utf-8') as f:
107
74
  self.temp_memory = json.load(f)
75
+ else:
76
+ self.temp_memory = []
108
77
 
109
- print("Memory manager initialized with all memory types")
78
+ logger.info("Agent memory manager initialized.")
110
79
 
111
80
  async def _save_init_memory(self):
112
- """Save init memory to file"""
113
81
  with open(self.init_memory_file, 'w', encoding='utf-8') as f:
114
82
  json.dump(self.init_memory, f, ensure_ascii=False, indent=2)
115
83
 
116
84
  async def update_init_memory(self, new_memory: Dict[str, Any]):
117
- """Update init memory with new values"""
118
85
  self.init_memory.update(new_memory)
119
86
  await self._save_init_memory()
120
87
 
121
88
  async def _save_core_memory(self):
122
- """Save core memory to file"""
123
89
  with open(self.core_memory_file, 'w', encoding='utf-8') as f:
124
90
  json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
125
91
 
126
92
  async def _save_context(self):
127
- """Save context to file"""
128
93
  with open(self.context_file, 'w', encoding='utf-8') as f:
129
94
  json.dump(self.context_history, f, ensure_ascii=False, indent=2)
130
95
 
131
96
  async def _save_temp_memory(self):
132
- """Save temp memory to file"""
133
97
  with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
134
98
  json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
135
99
 
136
100
  async def add_context_entry(self, role: str, content: str):
137
- """Add an entry to context"""
138
- entry = {
139
- "id": generate_id(),
140
- "role": role, # "user" or "assistant"
141
- "content": content,
142
- "timestamp": datetime.now().isoformat()
143
- }
101
+ entry = {"id": generate_id(), "role": role, "content": content, "timestamp": datetime.now().isoformat()}
144
102
  self.context_history.append(entry)
145
-
146
- # Keep only last 20 context entries (10 rounds)
147
- if len(self.context_history) > 20:
148
- self.context_history = self.context_history[-20:]
149
-
150
103
  await self._save_context()
151
104
 
152
105
  async def add_detailed_context_entry(self, input_messages: List[Dict[str, str]],
153
106
  prompt: str, llm_response: str,
154
107
  tool_executions: List[Dict[str, Any]],
155
- final_response: str,
156
- entry_id: str = None):
157
- """Add or update a detailed context entry with full LLM interaction details"""
158
- # Check if we're updating an existing entry
108
+ final_response: str, entry_id: str = None):
109
+ update_data = {
110
+ "input_messages": input_messages, "prompt": prompt, "llm_response": llm_response,
111
+ "tool_executions": tool_executions, "final_response": final_response,
112
+ "timestamp": datetime.now().isoformat()
113
+ }
159
114
  if entry_id:
160
- # Find the entry with the given ID and update it
161
115
  for entry in self.context_history:
162
116
  if entry.get("id") == entry_id:
163
- entry.update({
164
- "input_messages": input_messages,
165
- "prompt": prompt,
166
- "llm_response": llm_response,
167
- "tool_executions": tool_executions,
168
- "final_response": final_response,
169
- "timestamp": datetime.now().isoformat()
170
- })
117
+ entry.update(update_data)
171
118
  await self._save_context()
172
119
  return entry_id
173
120
 
174
- # If no entry_id was provided or the entry wasn't found, create a new one
175
- entry = {
176
- "id": entry_id or generate_id(),
177
- "type": "llm_interaction",
178
- "role": "assistant", # Add role for llm_interaction entries
179
- "input_messages": input_messages,
180
- "prompt": prompt,
181
- "llm_response": llm_response,
182
- "tool_executions": tool_executions,
183
- "final_response": final_response,
184
- "timestamp": datetime.now().isoformat()
185
- }
186
- self.context_history.append(entry)
187
-
188
- # Keep only last 20 context entries
189
- if len(self.context_history) > 20:
190
- self.context_history = self.context_history[-20:]
191
-
121
+ new_entry = {"id": entry_id or generate_id(), "type": "llm_interaction", "role": "assistant", **update_data}
122
+ self.context_history.append(new_entry)
192
123
  await self._save_context()
193
- return entry["id"]
124
+ return new_entry["id"]
194
125
 
195
- async def get_recent_context(self, rounds: int = 5) -> List[Dict[str, Any]]:
196
- """Get recent context (default: last 5 rounds, 10 entries)"""
197
- # Each round consists of user message and assistant response
198
- entries_needed = rounds * 2
199
- return self.context_history[-entries_needed:] if self.context_history else []
126
+ async def get_recent_context(self, entries: int = 10) -> List[Dict[str, Any]]:
127
+ return self.context_history[-entries:]
200
128
 
201
129
  async def get_detailed_context_history(self) -> List[Dict[str, Any]]:
202
- """Get the full detailed context history"""
203
130
  return self.context_history
204
131
 
205
132
  async def get_last_agent_response(self) -> Optional[str]:
206
- """Get the last response from the agent"""
207
133
  for entry in reversed(self.context_history):
208
- if entry.get("role") == "assistant":
209
- return entry.get("content")
210
- elif entry.get("type") == "llm_interaction":
211
- return entry.get("final_response")
134
+ if entry.get("type") == "llm_interaction":
135
+ final_response = entry.get("final_response", "")
136
+ if final_response and final_response not in ["Processing started", "Prompt sent to LLM", "LLM response received"]:
137
+ return final_response
138
+ elif entry.get("role") == "assistant":
139
+ content = entry.get("content", "")
140
+ if content and content != "Processing started":
141
+ return content
212
142
  return None
213
143
 
214
144
  async def reset_context(self):
215
- """Reset context"""
216
145
  self.context_history = []
217
146
  await self._save_context()
218
147
 
219
148
  async def reset_temp_memory(self):
220
- """Reset only temp memory to default values from example files"""
221
- # Load default temp memory from example
222
- example_temp_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
223
- "..", "docs", "working_dir_example", "agent", "memory", "temp_memory.json")
224
- if os.path.exists(example_temp_path):
225
- with open(example_temp_path, 'r', encoding='utf-8') as f:
226
- self.temp_memory = json.load(f)
227
- else:
228
- # Fallback to empty list with one test entry if example file not found
229
- self.temp_memory = [
230
- {
231
- "id": "0test0",
232
- "content": "This is a test temp_memory.",
233
- "role": "Vedal987",
234
- "timestamp": "2024-12-24T00:00:00.000000"
235
- }
236
- ]
237
-
238
- # Save only temp memory
149
+ """Reset temp memory to a default empty state."""
150
+ self.temp_memory = []
239
151
  await self._save_temp_memory()
240
-
241
- print("Temp memory has been reset to default values from example files")
152
+ logger.info("Agent temp memory has been reset.")
242
153
 
243
154
  async def get_full_context(self) -> str:
244
- """Get all memory as context for LLM"""
245
- context_parts = []
246
-
247
- # Add init memory
248
- context_parts.append("=== INIT MEMORY (Immutable) ===")
249
- for key, value in self.init_memory.items():
250
- context_parts.append(f"{key}: {value}")
251
-
252
- # Add core memory
155
+ context_parts = ["=== INIT MEMORY (Immutable) ===", json.dumps(self.init_memory, indent=2)]
253
156
  context_parts.append("\n=== CORE MEMORY (Long-term, Mutable) ===")
254
157
  if "blocks" in self.core_memory:
255
158
  for block_id, block in self.core_memory["blocks"].items():
256
- context_parts.append(f"\nBlock: {block['title']} ({block_id})")
257
- context_parts.append(f"Description: {block['description']}")
159
+ context_parts.append(f"\nBlock: {block.get('title', '')} ({block_id})")
160
+ context_parts.append(f"Description: {block.get('description', '')}")
258
161
  context_parts.append("Content:")
259
- for item in block["content"]:
162
+ for item in block.get("content", []):
260
163
  context_parts.append(f" - {item}")
261
-
262
- # Add context (recent conversation history)
263
- context_parts.append("\n=== CONTEXT (Recent Conversation) ===")
264
- recent_context = await self.get_recent_context(5)
265
- for i, entry in enumerate(recent_context):
266
- # Handle entries with and without 'role' field
267
- if "role" in entry:
268
- role_display = "User" if entry["role"] == "user" else "Assistant"
269
- content = entry.get('content', entry.get('final_response', 'Unknown entry'))
270
- context_parts.append(f"{i+1}. [{role_display}] {content}")
271
- elif "type" in entry and entry["type"] == "llm_interaction":
272
- # For detailed LLM interaction entries with role: assistant
273
- if entry.get("role") == "assistant":
274
- context_parts.append(f"{i+1}. [Assistant] {entry.get('final_response', 'Processing step')}")
275
- else:
276
- # For other llm_interaction entries without role
277
- context_parts.append(f"{i+1}. [System] {entry.get('final_response', 'Processing step')}")
278
- else:
279
- # Default fallback
280
- context_parts.append(f"{i+1}. [System] {entry.get('content', 'Unknown entry')}")
281
-
282
- # Add temp memory (only for temporary state, not dialog history)
283
164
  if self.temp_memory:
284
165
  context_parts.append("\n=== TEMP MEMORY (Processing State) ===")
285
166
  for item in self.temp_memory:
286
167
  context_parts.append(f"[{item.get('role', 'system')}] {item.get('content', '')}")
287
-
288
168
  return "\n".join(context_parts)
289
169
 
290
170
  async def add_temp_memory(self, content: str, role: str = "system"):
291
- """Add an item to temp memory (for temporary processing state)"""
292
- self.temp_memory.append({
293
- "id": generate_id(),
294
- "content": content,
295
- "role": role,
296
- "timestamp": datetime.now().isoformat()
297
- })
298
-
299
- # Keep only last 20 temp items
171
+ self.temp_memory.append({"id": generate_id(), "content": content, "role": role, "timestamp": datetime.now().isoformat()})
300
172
  if len(self.temp_memory) > 20:
301
173
  self.temp_memory = self.temp_memory[-20:]
302
-
303
174
  await self._save_temp_memory()
304
175
 
305
- # Core memory management methods
306
176
  async def get_core_memory_blocks(self) -> Dict[str, Any]:
307
- """Get all core memory blocks"""
308
177
  return self.core_memory.get("blocks", {})
309
178
 
310
179
  async def get_core_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
311
- """Get a specific core memory block"""
312
- blocks = self.core_memory.get("blocks", {})
313
- return blocks.get(block_id)
180
+ return self.core_memory.get("blocks", {}).get(block_id)
314
181
 
315
- async def create_core_memory_block(self, title: str, description: str, content: List[str]):
316
- """Create a new core memory block with a generated ID"""
182
+ async def create_core_memory_block(self, title: str, description: str, content: List[str]) -> str:
317
183
  block_id = generate_id()
318
-
319
184
  if "blocks" not in self.core_memory:
320
185
  self.core_memory["blocks"] = {}
321
-
322
186
  self.core_memory["blocks"][block_id] = {
323
- "id": block_id,
324
- "title": title,
325
- "description": description,
326
- "content": content if content else []
187
+ "id": block_id, "title": title, "description": description, "content": content or []
327
188
  }
328
-
329
189
  await self._save_core_memory()
330
- return block_id # Return the generated ID
190
+ return block_id
331
191
 
332
- async def update_core_memory_block(self, block_id: str, title: str = None, description: str = None, content: List[str] = None):
333
- """Update a core memory block"""
334
- if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
192
+ async def update_core_memory_block(self, block_id: str, title: Optional[str] = None, description: Optional[str] = None, content: Optional[List[str]] = None):
193
+ block = self.core_memory.get("blocks", {}).get(block_id)
194
+ if not block:
335
195
  raise ValueError(f"Block '{block_id}' not found")
336
-
337
- block = self.core_memory["blocks"][block_id]
338
- if title is not None:
339
- block["title"] = title
340
- if description is not None:
341
- block["description"] = description
342
- if content is not None:
343
- block["content"] = content
344
-
196
+ if title is not None: block["title"] = title
197
+ if description is not None: block["description"] = description
198
+ if content is not None: block["content"] = content
345
199
  await self._save_core_memory()
346
200
 
347
201
  async def delete_core_memory_block(self, block_id: str):
348
- """Delete a core memory block"""
349
202
  if "blocks" in self.core_memory and block_id in self.core_memory["blocks"]:
350
203
  del self.core_memory["blocks"][block_id]
351
- await self._save_core_memory()
352
-
353
- async def add_to_core_memory_block(self, block_id: str, item: str):
354
- """Add an item to a core memory block"""
355
- if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
356
- raise ValueError(f"Block '{block_id}' not found")
357
-
358
- self.core_memory["blocks"][block_id]["content"].append(item)
359
- await self._save_core_memory()
360
-
361
- async def remove_from_core_memory_block(self, block_id: str, index: int):
362
- """Remove an item from a core memory block by index"""
363
- if "blocks" not in self.core_memory or block_id not in self.core_memory["blocks"]:
364
- raise ValueError(f"Block '{block_id}' not found")
365
-
366
- if 0 <= index < len(self.core_memory["blocks"][block_id]["content"]):
367
- self.core_memory["blocks"][block_id]["content"].pop(index)
368
- await self._save_core_memory()
369
- else:
370
- raise IndexError(f"Index {index} out of range for block '{block_id}'")
204
+ await self._save_core_memory()
@@ -1,4 +1 @@
1
- # agent/tools/__init__.py
2
- """
3
- Tools module for the Neuro Simulator Agent
4
- """
1
+ # neuro_simulator.agent.tools package