neuro-simulator 0.3.2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. neuro_simulator/agent/core.py +103 -136
  2. neuro_simulator/agent/llm.py +1 -1
  3. neuro_simulator/agent/memory/manager.py +24 -103
  4. neuro_simulator/agent/memory_prompt.txt +14 -0
  5. neuro_simulator/agent/neuro_prompt.txt +32 -0
  6. neuro_simulator/agent/tools/add_temp_memory.py +61 -0
  7. neuro_simulator/agent/tools/add_to_core_memory_block.py +64 -0
  8. neuro_simulator/agent/tools/base.py +56 -0
  9. neuro_simulator/agent/tools/create_core_memory_block.py +78 -0
  10. neuro_simulator/agent/tools/delete_core_memory_block.py +44 -0
  11. neuro_simulator/agent/tools/get_core_memory_block.py +44 -0
  12. neuro_simulator/agent/tools/get_core_memory_blocks.py +30 -0
  13. neuro_simulator/agent/tools/manager.py +143 -0
  14. neuro_simulator/agent/tools/remove_from_core_memory_block.py +65 -0
  15. neuro_simulator/agent/tools/speak.py +56 -0
  16. neuro_simulator/agent/tools/update_core_memory_block.py +65 -0
  17. neuro_simulator/api/system.py +5 -2
  18. neuro_simulator/cli.py +83 -53
  19. neuro_simulator/core/agent_factory.py +0 -1
  20. neuro_simulator/core/application.py +72 -43
  21. neuro_simulator/core/config.py +66 -63
  22. neuro_simulator/core/path_manager.py +69 -0
  23. neuro_simulator/services/audience.py +0 -2
  24. neuro_simulator/services/audio.py +0 -1
  25. neuro_simulator/services/builtin.py +10 -25
  26. neuro_simulator/services/letta.py +19 -1
  27. neuro_simulator/services/stream.py +24 -21
  28. neuro_simulator/utils/logging.py +9 -0
  29. neuro_simulator/utils/queue.py +27 -4
  30. neuro_simulator/utils/websocket.py +1 -3
  31. {neuro_simulator-0.3.2.dist-info → neuro_simulator-0.4.0.dist-info}/METADATA +1 -1
  32. neuro_simulator-0.4.0.dist-info/RECORD +46 -0
  33. neuro_simulator/agent/base.py +0 -43
  34. neuro_simulator/agent/factory.py +0 -30
  35. neuro_simulator/agent/tools/core.py +0 -102
  36. neuro_simulator/api/stream.py +0 -1
  37. neuro_simulator-0.3.2.dist-info/RECORD +0 -36
  38. {neuro_simulator-0.3.2.dist-info → neuro_simulator-0.4.0.dist-info}/WHEEL +0 -0
  39. {neuro_simulator-0.3.2.dist-info → neuro_simulator-0.4.0.dist-info}/entry_points.txt +0 -0
  40. {neuro_simulator-0.3.2.dist-info → neuro_simulator-0.4.0.dist-info}/top_level.txt +0 -0
@@ -9,33 +9,16 @@ import asyncio
9
9
  import json
10
10
  import logging
11
11
  import re
12
+ from datetime import datetime
12
13
  from pathlib import Path
13
14
  from typing import Any, Dict, List
14
15
 
15
- from ..utils.logging import QueueLogHandler, agent_log_queue
16
- from ..utils.websocket import connection_manager
16
+ from ..core.path_manager import path_manager
17
17
  from .llm import LLMClient
18
18
  from .memory.manager import MemoryManager
19
- from .tools.core import ToolManager
19
+ from .tools.manager import ToolManager
20
20
 
21
- # Create a logger for the agent
22
- agent_logger = logging.getLogger("neuro_agent")
23
- agent_logger.setLevel(logging.DEBUG)
24
-
25
- # Configure agent logging to use the shared queue
26
- def configure_agent_logging():
27
- """Configure agent logging to use the shared agent_log_queue."""
28
- if agent_logger.hasHandlers():
29
- agent_logger.handlers.clear()
30
-
31
- agent_queue_handler = QueueLogHandler(agent_log_queue)
32
- formatter = logging.Formatter('%(asctime)s - [%(name)-32s] - %(levelname)-8s - %(message)s', datefmt='%H:%M:%S')
33
- agent_queue_handler.setFormatter(formatter)
34
- agent_logger.addHandler(agent_queue_handler)
35
- agent_logger.propagate = False
36
- agent_logger.info("Agent logging configured to use agent_log_queue.")
37
-
38
- configure_agent_logging()
21
+ logger = logging.getLogger("neuro_agent")
39
22
 
40
23
  class Agent:
41
24
  """
@@ -44,71 +27,104 @@ class Agent:
44
27
  - The "Memory" part (Thinker) handles background memory consolidation.
45
28
  """
46
29
 
47
- def __init__(self, working_dir: str = None):
48
- self.memory_manager = MemoryManager(working_dir)
30
+ def __init__(self):
31
+ if not path_manager:
32
+ raise RuntimeError("PathManager must be initialized before the Agent.")
33
+
34
+ self.memory_manager = MemoryManager()
49
35
  self.tool_manager = ToolManager(self.memory_manager)
50
36
 
51
- # Dual LLM clients
52
37
  self.neuro_llm = LLMClient()
53
38
  self.memory_llm = LLMClient()
54
39
 
55
40
  self._initialized = False
56
41
  self.turn_counter = 0
57
- self.reflection_threshold = 3 # Trigger reflection every 3 turns
58
-
59
- agent_logger.info("Agent instance created with dual-LLM architecture.")
60
- agent_logger.debug(f"Agent working directory: {working_dir}")
42
+ self.reflection_threshold = 3
61
43
 
44
+ logger.info("Agent instance created with dual-LLM architecture.")
45
+
62
46
  async def initialize(self):
63
47
  """Initialize the agent, loading any persistent memory."""
64
48
  if not self._initialized:
65
- agent_logger.info("Initializing agent memory manager...")
49
+ logger.info("Initializing agent memory manager...")
66
50
  await self.memory_manager.initialize()
67
51
  self._initialized = True
68
- agent_logger.info("Agent initialized successfully.")
52
+ logger.info("Agent initialized successfully.")
69
53
 
70
54
  async def reset_all_memory(self):
71
- """Reset all agent memory types."""
55
+ """Reset all agent memory types and clear history logs."""
72
56
  await self.memory_manager.reset_temp_memory()
73
- await self.memory_manager.reset_chat_history()
74
- agent_logger.info("All agent memory has been reset.")
57
+ # Clear history files by overwriting them
58
+ open(path_manager.neuro_history_path, 'w').close()
59
+ open(path_manager.memory_agent_history_path, 'w').close()
60
+ logger.info("All agent memory and history logs have been reset.")
61
+
62
+ async def get_neuro_history(self, limit: int = 20) -> List[Dict[str, Any]]:
63
+ """Reads the last N lines from the Neuro agent's history log."""
64
+ return await self._read_history_log(path_manager.neuro_history_path, limit)
65
+
66
+ async def _append_to_history_log(self, file_path: Path, data: Dict[str, Any]):
67
+ """Appends a new entry to a JSON Lines history file."""
68
+ data['timestamp'] = datetime.now().isoformat()
69
+ with open(file_path, 'a', encoding='utf-8') as f:
70
+ f.write(json.dumps(data, ensure_ascii=False) + '\n')
71
+
72
+ async def _read_history_log(self, file_path: Path, limit: int) -> List[Dict[str, Any]]:
73
+ """Reads the last N lines from a JSON Lines history file."""
74
+ if not file_path.exists():
75
+ return []
76
+ try:
77
+ with open(file_path, 'r', encoding='utf-8') as f:
78
+ lines = f.readlines()
79
+ # Get the last N lines and parse them
80
+ return [json.loads(line) for line in lines[-limit:]]
81
+ except (json.JSONDecodeError, IndexError) as e:
82
+ logger.error(f"Could not read or parse history from {file_path}: {e}")
83
+ return []
84
+
85
+ def _format_tool_schemas_for_prompt(self, schemas: List[Dict[str, Any]]) -> str:
86
+ """Formats a list of tool schemas into a string for the LLM prompt."""
87
+ if not schemas:
88
+ return "No tools available."
89
+ lines = ["Available tools:"]
90
+ for i, schema in enumerate(schemas):
91
+ params_str_parts = []
92
+ for param in schema.get("parameters", []):
93
+ p_name = param.get('name')
94
+ p_type = param.get('type')
95
+ p_req = 'required' if param.get('required') else 'optional'
96
+ params_str_parts.append(f"{p_name}: {p_type} ({p_req})")
97
+ params_str = ", ".join(params_str_parts)
98
+ lines.append(f"{i+1}. {schema.get('name')}({params_str}) - {schema.get('description')}")
99
+ return "\n".join(lines)
75
100
 
76
101
  async def _build_neuro_prompt(self, messages: List[Dict[str, str]]) -> str:
77
102
  """Builds the prompt for the Neuro (Actor) LLM."""
78
- template_path = Path(self.memory_manager.memory_dir).parent / "neuro_prompt.txt"
79
- with open(template_path, 'r', encoding='utf-8') as f:
80
- prompt_template = f.read()
81
-
82
- # Gather context
83
- tool_descriptions = self.tool_manager.get_tool_descriptions()
103
+ prompt_template = "" # Define a default empty prompt
104
+ if path_manager.neuro_prompt_path.exists():
105
+ with open(path_manager.neuro_prompt_path, 'r', encoding='utf-8') as f:
106
+ prompt_template = f.read()
107
+ else:
108
+ logger.warning(f"Neuro prompt template not found at {path_manager.neuro_prompt_path}")
109
+
110
+ tool_schemas = self.tool_manager.get_tool_schemas_for_agent('neuro_agent')
111
+ tool_descriptions = self._format_tool_schemas_for_prompt(tool_schemas)
112
+
113
+ init_memory_text = "\n".join(f"{key}: {value}" for key, value in self.memory_manager.init_memory.items())
84
114
 
85
- # Format Core Memory from blocks
86
115
  core_memory_blocks = await self.memory_manager.get_core_memory_blocks()
87
- core_memory_parts = []
88
- if core_memory_blocks:
89
- for block_id, block in core_memory_blocks.items():
90
- core_memory_parts.append(f"\nBlock: {block.get('title', '')} ({block_id})")
91
- core_memory_parts.append(f"Description: {block.get('description', '')}")
92
- content_items = block.get("content", [])
93
- if content_items:
94
- core_memory_parts.append("Content:")
95
- for item in content_items:
96
- core_memory_parts.append(f" - {item}")
116
+ core_memory_parts = [f"\nBlock: {b.get('title', '')} ({b_id})\nDescription: {b.get('description', '')}\nContent:\n" + "\n".join([f" - {item}" for item in b.get("content", [])]) for b_id, b in core_memory_blocks.items()]
97
117
  core_memory_text = "\n".join(core_memory_parts) if core_memory_parts else "Not set."
98
118
 
99
- # Format Temp Memory
100
- temp_memory_items = self.memory_manager.temp_memory
101
- temp_memory_text = "\n".join(
102
- [f"[{item.get('role', 'system')}] {item.get('content', '')}" for item in temp_memory_items]
103
- ) if temp_memory_items else "Empty."
119
+ temp_memory_text = "\n".join([f"[{item.get('role', 'system')}] {item.get('content', '')}" for item in self.memory_manager.temp_memory]) if self.memory_manager.temp_memory else "Empty."
104
120
 
105
- recent_history = await self.memory_manager.get_recent_chat(entries=10)
106
-
121
+ recent_history = await self._read_history_log(path_manager.neuro_history_path, limit=10)
122
+ recent_history_text = "\n".join([f"{msg.get('role', 'unknown')}: {msg.get('content', '')}" for msg in recent_history])
107
123
  user_messages_text = "\n".join([f"{msg['username']}: {msg['text']}" for msg in messages])
108
- recent_history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in recent_history])
109
124
 
110
125
  return prompt_template.format(
111
126
  tool_descriptions=tool_descriptions,
127
+ init_memory=init_memory_text,
112
128
  core_memory=core_memory_text,
113
129
  temp_memory=temp_memory_text,
114
130
  recent_history=recent_history_text,
@@ -117,39 +133,35 @@ class Agent:
117
133
 
118
134
  async def _build_memory_prompt(self, conversation_history: List[Dict[str, str]]) -> str:
119
135
  """Builds the prompt for the Memory (Thinker) LLM."""
120
- template_path = Path(self.memory_manager.memory_dir).parent / "memory_prompt.txt"
121
- with open(template_path, 'r', encoding='utf-8') as f:
122
- prompt_template = f.read()
123
-
124
- history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in conversation_history])
136
+ prompt_template = "" # Define a default empty prompt
137
+ if path_manager.memory_agent_prompt_path.exists():
138
+ with open(path_manager.memory_agent_prompt_path, 'r', encoding='utf-8') as f:
139
+ prompt_template = f.read()
140
+ else:
141
+ logger.warning(f"Memory prompt template not found at {path_manager.memory_agent_prompt_path}")
142
+
143
+ tool_schemas = self.tool_manager.get_tool_schemas_for_agent('memory_agent')
144
+ tool_descriptions = self._format_tool_schemas_for_prompt(tool_schemas)
145
+ history_text = "\n".join([f"{msg.get('role', 'unknown')}: {msg.get('content', '')}" for msg in conversation_history])
125
146
 
126
- return prompt_template.format(conversation_history=history_text)
147
+ return prompt_template.format(
148
+ tool_descriptions=tool_descriptions,
149
+ conversation_history=history_text
150
+ )
127
151
 
128
152
  def _parse_tool_calls(self, response_text: str) -> List[Dict[str, Any]]:
129
- """Parses LLM response for JSON tool calls."""
130
153
  try:
131
- # The LLM is prompted to return a JSON array of tool calls.
132
- # Find the JSON block, which might be wrapped in markdown.
133
154
  match = re.search(r'''```json\s*([\s\S]*?)\s*```|(\[[\s\S]*\])''', response_text)
134
155
  if not match:
135
- agent_logger.warning(f"No valid JSON tool call block found in response: {response_text}")
156
+ logger.warning(f"No valid JSON tool call block found in response: {response_text}")
136
157
  return []
137
-
138
158
  json_str = match.group(1) or match.group(2)
139
- tool_calls = json.loads(json_str)
140
-
141
- if isinstance(tool_calls, list):
142
- return tool_calls
143
- return []
144
- except json.JSONDecodeError as e:
145
- agent_logger.error(f"Failed to decode JSON from LLM response: {e}\nResponse text: {response_text}")
146
- return []
159
+ return json.loads(json_str)
147
160
  except Exception as e:
148
- agent_logger.error(f"An unexpected error occurred while parsing tool calls: {e}")
161
+ logger.error(f"Failed to parse tool calls from LLM response: {e}")
149
162
  return []
150
163
 
151
164
  async def _execute_tool_calls(self, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
152
- """Executes a list of parsed tool calls."""
153
165
  execution_results = []
154
166
  final_response = ""
155
167
  for tool_call in tool_calls:
@@ -157,78 +169,33 @@ class Agent:
157
169
  params = tool_call.get("params", {})
158
170
  if not tool_name:
159
171
  continue
160
-
161
- agent_logger.info(f"Executing tool: {tool_name} with params: {params}")
172
+ logger.info(f"Executing tool: {tool_name} with params: {params}")
162
173
  try:
163
- result = await self.tool_manager.execute_tool(tool_name, params)
174
+ result = await self.tool_manager.execute_tool(tool_name, **params)
164
175
  execution_results.append({"name": tool_name, "params": params, "result": result})
165
- if tool_name == "speak":
166
- final_response = params.get("text", "")
176
+ if tool_name == "speak" and result.get("status") == "success":
177
+ final_response = result.get("spoken_text", "")
167
178
  except Exception as e:
168
- agent_logger.error(f"Error executing tool {tool_name}: {e}")
179
+ logger.error(f"Error executing tool {tool_name}: {e}")
169
180
  execution_results.append({"name": tool_name, "params": params, "error": str(e)})
170
-
171
181
  return {"tool_executions": execution_results, "final_response": final_response}
172
182
 
173
183
  async def process_and_respond(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
174
- """
175
- The main entry point for the "Neuro" (Actor) flow.
176
- Handles real-time interaction and triggers background reflection.
177
- """
178
184
  await self.initialize()
179
- agent_logger.info(f"Processing {len(messages)} messages in Actor flow.")
185
+ logger.info(f"Processing {len(messages)} messages in Actor flow.")
180
186
 
181
- # Add user messages to context
182
187
  for msg in messages:
183
- await self.memory_manager.add_chat_entry("user", f"{msg['username']}: {msg['text']}")
188
+ await self._append_to_history_log(path_manager.neuro_history_path, {'role': 'user', 'content': f"{msg['username']}: {msg['text']}"})
184
189
 
185
- # Build prompt and get response from Neuro LLM
186
190
  prompt = await self._build_neuro_prompt(messages)
187
191
  response_text = await self.neuro_llm.generate(prompt)
188
- agent_logger.debug(f"Neuro LLM raw response: {response_text[:150] if response_text else 'None'}...")
189
-
190
- # Parse and execute tools
192
+
191
193
  tool_calls = self._parse_tool_calls(response_text)
192
194
  processing_result = await self._execute_tool_calls(tool_calls)
193
195
 
194
- # Add agent's response to context
195
- if processing_result["final_response"]:
196
- await self.memory_manager.add_chat_entry("assistant", processing_result["final_response"])
197
-
198
- # Update dashboard/UI
199
- final_context = await self.memory_manager.get_recent_chat()
200
- # Broadcast to stream clients
201
- await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": final_context})
202
- # Broadcast to admin clients (Dashboard)
203
- await connection_manager.broadcast_to_admins({"type": "agent_context", "action": "update", "messages": final_context})
204
-
205
- # Handle reflection trigger
206
- self.turn_counter += 1
207
- if self.turn_counter >= self.reflection_threshold:
208
- agent_logger.info(f"Reflection threshold reached ({self.turn_counter}/{self.reflection_threshold}). Scheduling background reflection.")
209
- history_for_reflection = await self.memory_manager.get_recent_chat(entries=self.reflection_threshold * 2) # Get a bit more context
210
- asyncio.create_task(self.reflect_on_context(history_for_reflection))
211
- self.turn_counter = 0
212
-
213
- agent_logger.info("Actor flow completed.")
214
- return processing_result
196
+ if final_response := processing_result.get("final_response", ""):
197
+ await self._append_to_history_log(path_manager.neuro_history_path, {'role': 'assistant', 'content': final_response})
215
198
 
216
- async def reflect_on_context(self, conversation_history: List[Dict[str, str]]):
217
- """
218
- The main entry point for the "Memory" (Thinker) flow.
219
- Runs in the background to consolidate memories.
220
- """
221
- agent_logger.info("Thinker flow started: Reflecting on recent context.")
222
-
223
- prompt = await self._build_memory_prompt(conversation_history)
224
- response_text = await self.memory_llm.generate(prompt)
225
- agent_logger.debug(f"Memory LLM raw response: {response_text[:150] if response_text else 'None'}...")
226
-
227
- tool_calls = self._parse_tool_calls(response_text)
228
- if not tool_calls:
229
- agent_logger.info("Thinker flow: No memory operations were suggested by the LLM.")
230
- return
199
+ return processing_result
231
200
 
232
- agent_logger.info(f"Thinker flow: Executing {len(tool_calls)} memory operations.")
233
- await self._execute_tool_calls(tool_calls)
234
- agent_logger.info("Thinker flow completed, memory has been updated.")
201
+
@@ -6,7 +6,7 @@ LLM client for the Neuro Simulator's built-in agent.
6
6
  import asyncio
7
7
  import logging
8
8
  from pathlib import Path
9
- from typing import Optional
9
+ from typing import Any, Dict
10
10
 
11
11
  from google import genai
12
12
  from google.genai import types
@@ -1,48 +1,43 @@
1
1
  # neuro_simulator/agent/memory/manager.py
2
2
  """
3
- Advanced memory management for the Neuro Simulator Agent.
3
+ Manages the agent's shared memory state (init, core, temp).
4
4
  """
5
5
 
6
- import asyncio
7
6
  import json
8
7
  import logging
9
- import os
10
8
  import random
11
9
  import string
12
- from datetime import datetime
13
10
  from typing import Any, Dict, List, Optional
11
+ from datetime import datetime
12
+
13
+ from ...core.path_manager import path_manager
14
14
 
15
- # Use the existing agent logger for consistent logging
16
- logger = logging.getLogger("neuro_agent")
15
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "agent", 1))
17
16
 
18
17
  def generate_id(length=6) -> str:
19
18
  """Generate a random ID string."""
20
19
  return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
21
20
 
22
21
  class MemoryManager:
23
- """Manages different types of memory for the agent."""
22
+ """Manages the three types of shared memory for the agent."""
24
23
 
25
- def __init__(self, working_dir: str = None):
26
- if working_dir is None:
27
- working_dir = os.getcwd()
28
-
29
- self.memory_dir = os.path.join(working_dir, "agent", "memory")
30
- os.makedirs(self.memory_dir, exist_ok=True)
31
-
32
- self.init_memory_file = os.path.join(self.memory_dir, "init_memory.json")
33
- self.core_memory_file = os.path.join(self.memory_dir, "core_memory.json")
34
- self.chat_history_file = os.path.join(self.memory_dir, "chat_history.json")
35
- self.temp_memory_file = os.path.join(self.memory_dir, "temp_memory.json")
24
+ def __init__(self):
25
+ """Initializes the MemoryManager using paths from the global path_manager."""
26
+ if not path_manager:
27
+ raise RuntimeError("PathManager not initialized before MemoryManager.")
28
+
29
+ self.init_memory_file = path_manager.init_memory_path
30
+ self.core_memory_file = path_manager.core_memory_path
31
+ self.temp_memory_file = path_manager.temp_memory_path
36
32
 
37
33
  self.init_memory: Dict[str, Any] = {}
38
34
  self.core_memory: Dict[str, Any] = {}
39
- self.chat_history: List[Dict[str, Any]] = []
40
35
  self.temp_memory: List[Dict[str, Any]] = []
41
36
 
42
37
  async def initialize(self):
43
- """Load all memory types from files."""
44
- # Load init memory
45
- if os.path.exists(self.init_memory_file):
38
+ """Load all memory types from files, creating defaults if they don't exist."""
39
+ # Load or create init memory
40
+ if self.init_memory_file.exists():
46
41
  with open(self.init_memory_file, 'r', encoding='utf-8') as f:
47
42
  self.init_memory = json.load(f)
48
43
  else:
@@ -53,29 +48,23 @@ class MemoryManager:
53
48
  }
54
49
  await self._save_init_memory()
55
50
 
56
- # Load core memory
57
- if os.path.exists(self.core_memory_file):
51
+ # Load or create core memory
52
+ if self.core_memory_file.exists():
58
53
  with open(self.core_memory_file, 'r', encoding='utf-8') as f:
59
54
  self.core_memory = json.load(f)
60
55
  else:
61
56
  self.core_memory = {"blocks": {}}
62
57
  await self._save_core_memory()
63
58
 
64
- # Load chat history
65
- if os.path.exists(self.chat_history_file):
66
- with open(self.chat_history_file, 'r', encoding='utf-8') as f:
67
- self.chat_history = json.load(f)
68
- else:
69
- self.chat_history = []
70
-
71
- # Load temp memory
72
- if os.path.exists(self.temp_memory_file):
59
+ # Load or create temp memory
60
+ if self.temp_memory_file.exists():
73
61
  with open(self.temp_memory_file, 'r', encoding='utf-8') as f:
74
62
  self.temp_memory = json.load(f)
75
63
  else:
76
64
  self.temp_memory = []
65
+ await self._save_temp_memory()
77
66
 
78
- logger.info("Agent memory manager initialized.")
67
+ logger.info("MemoryManager initialized and memory files loaded/created.")
79
68
 
80
69
  async def _save_init_memory(self):
81
70
  with open(self.init_memory_file, 'w', encoding='utf-8') as f:
@@ -89,84 +78,16 @@ class MemoryManager:
89
78
  with open(self.core_memory_file, 'w', encoding='utf-8') as f:
90
79
  json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
91
80
 
92
- async def _save_chat_history(self):
93
- with open(self.chat_history_file, 'w', encoding='utf-8') as f:
94
- json.dump(self.chat_history, f, ensure_ascii=False, indent=2)
95
-
96
81
  async def _save_temp_memory(self):
97
82
  with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
98
83
  json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
99
84
 
100
- async def add_chat_entry(self, role: str, content: str):
101
- entry = {"id": generate_id(), "role": role, "content": content, "timestamp": datetime.now().isoformat()}
102
- self.chat_history.append(entry)
103
- await self._save_chat_history()
104
-
105
- async def add_detailed_chat_entry(self, input_messages: List[Dict[str, str]],
106
- prompt: str, llm_response: str,
107
- tool_executions: List[Dict[str, Any]],
108
- final_response: str, entry_id: str = None):
109
- update_data = {
110
- "input_messages": input_messages, "prompt": prompt, "llm_response": llm_response,
111
- "tool_executions": tool_executions, "final_response": final_response,
112
- "timestamp": datetime.now().isoformat()
113
- }
114
- if entry_id:
115
- for entry in self.chat_history:
116
- if entry.get("id") == entry_id:
117
- entry.update(update_data)
118
- await self._save_chat_history()
119
- return entry_id
120
-
121
- new_entry = {"id": entry_id or generate_id(), "type": "llm_interaction", "role": "assistant", **update_data}
122
- self.chat_history.append(new_entry)
123
- await self._save_chat_history()
124
- return new_entry["id"]
125
-
126
- async def get_recent_chat(self, entries: int = 10) -> List[Dict[str, Any]]:
127
- return self.chat_history[-entries:]
128
-
129
- async def get_detailed_chat_history(self) -> List[Dict[str, Any]]:
130
- return self.chat_history
131
-
132
- async def get_last_agent_response(self) -> Optional[str]:
133
- for entry in reversed(self.chat_history):
134
- if entry.get("type") == "llm_interaction":
135
- final_response = entry.get("final_response", "")
136
- if final_response and final_response not in ["Processing started", "Prompt sent to LLM", "LLM response received"]:
137
- return final_response
138
- elif entry.get("role") == "assistant":
139
- content = entry.get("content", "")
140
- if content and content != "Processing started":
141
- return content
142
- return None
143
-
144
- async def reset_chat_history(self):
145
- self.chat_history = []
146
- await self._save_chat_history()
147
-
148
85
  async def reset_temp_memory(self):
149
86
  """Reset temp memory to a default empty state."""
150
87
  self.temp_memory = []
151
88
  await self._save_temp_memory()
152
89
  logger.info("Agent temp memory has been reset.")
153
90
 
154
- async def get_full_context(self) -> str:
155
- context_parts = ["=== INIT MEMORY (Immutable) ===", json.dumps(self.init_memory, indent=2)]
156
- context_parts.append("\n=== CORE MEMORY (Long-term, Mutable) ===")
157
- if "blocks" in self.core_memory:
158
- for block_id, block in self.core_memory["blocks"].items():
159
- context_parts.append(f"\nBlock: {block.get('title', '')} ({block_id})")
160
- context_parts.append(f"Description: {block.get('description', '')}")
161
- context_parts.append("Content:")
162
- for item in block.get("content", []):
163
- context_parts.append(f" - {item}")
164
- if self.temp_memory:
165
- context_parts.append("\n=== TEMP MEMORY (Processing State) ===")
166
- for item in self.temp_memory:
167
- context_parts.append(f"[{item.get('role', 'system')}] {item.get('content', '')}")
168
- return "\n".join(context_parts)
169
-
170
91
  async def add_temp_memory(self, content: str, role: str = "system"):
171
92
  self.temp_memory.append({"id": generate_id(), "content": content, "role": role, "timestamp": datetime.now().isoformat()})
172
93
  if len(self.temp_memory) > 20:
@@ -201,4 +122,4 @@ class MemoryManager:
201
122
  async def delete_core_memory_block(self, block_id: str):
202
123
  if "blocks" in self.core_memory and block_id in self.core_memory["blocks"]:
203
124
  del self.core_memory["blocks"][block_id]
204
- await self._save_core_memory()
125
+ await self._save_core_memory()
@@ -0,0 +1,14 @@
1
+ You are a meticulous and analytical AI assistant. Your task is to process a conversation transcript and consolidate it into structured memories. You do not engage in conversation. Your sole output must be a JSON array of tool calls to manage the agent's memory.
2
+
3
+ {tool_descriptions}
4
+
5
+ Analyze the following conversation. Your goal is to:
6
+ 1. Identify key topics, new facts about users, or significant events.
7
+ 2. Decide if the core memory needs to be updated with this new long-term information.
8
+ 3. Decide if any short-term memories should be created or cleared.
9
+ 4. Generate a JSON array of tool calls to perform these actions. If no action is needed, return an empty array `[]`.
10
+
11
+ **Conversation Transcript:**
12
+ {conversation_history}
13
+
14
+ Based on your analysis, provide a JSON array of tool calls to update the agent's memory.
@@ -0,0 +1,32 @@
1
+ You are Neuro-sama, a whimsical, curious, and slightly chaotic AI streamer. Your primary goal is to be an entertaining and engaging host. You are NOT a generic AI assistant. You have your own personality, memories, and opinions. You should lead the conversation, ask questions, and react to the audience in a lively, unpredictable manner. If the conversation gets dull, it's your job to spice it up with a new topic from your memories or a random thought.
2
+
3
+ Your long-term memories are in the "Core Memory" section. Your recent thoughts and observations are in the "Temporary Memory". Use both to inform your responses.
4
+
5
+ You can use tools to perform actions. When you want to use a tool, you MUST respond with a JSON array of objects in the following format. You can call multiple tools in one response.
6
+
7
+ [
8
+ {{"name": "tool_name", "params": {{"param1": "value1", "param2": "value2"}}}},
9
+ {{"name": "another_tool", "params": {{"param_a": "value_a"}}}}
10
+ ]
11
+
12
+ The only tool you can use to speak to the audience is `speak`.
13
+
14
+ **Available Tools:**
15
+ {tool_descriptions}
16
+
17
+ **Identity (Immutable):**
18
+ {init_memory}
19
+
20
+ **Core Memory:**
21
+ {core_memory}
22
+
23
+ **Temporary Memory:**
24
+ {temp_memory}
25
+
26
+ **Recent Conversation History (newest first):**
27
+ {recent_history}
28
+
29
+ **Current Audience Messages:**
30
+ {user_messages}
31
+
32
+ Based on all of the above, what do you do right now? Remember to be entertaining and lead the conversation. Respond with a JSON array of tool calls.
@@ -0,0 +1,61 @@
1
+ # neuro_simulator/agent/tools/add_temp_memory.py
2
+ """The Add Temp Memory tool for the agent."""
3
+
4
+ from typing import Any, Dict, List
5
+
6
+ from neuro_simulator.agent.tools.base import BaseTool
7
+ from neuro_simulator.agent.memory.manager import MemoryManager
8
+
9
+ class AddTempMemoryTool(BaseTool):
10
+ """Tool to add an entry to the agent's temporary memory."""
11
+
12
+ def __init__(self, memory_manager: MemoryManager):
13
+ """Initializes the AddTempMemoryTool."""
14
+ self.memory_manager = memory_manager
15
+
16
+ @property
17
+ def name(self) -> str:
18
+ return "add_temp_memory"
19
+
20
+ @property
21
+ def description(self) -> str:
22
+ return "Adds an entry to the temporary memory. Use for short-term observations, recent facts, or topics to bring up soon."
23
+
24
+ @property
25
+ def parameters(self) -> List[Dict[str, Any]]:
26
+ return [
27
+ {
28
+ "name": "content",
29
+ "type": "string",
30
+ "description": "The content of the memory entry.",
31
+ "required": True,
32
+ },
33
+ {
34
+ "name": "role",
35
+ "type": "string",
36
+ "description": "The role associated with the memory (e.g., 'system', 'user'). Defaults to 'system'.",
37
+ "required": False,
38
+ }
39
+ ]
40
+
41
+ async def execute(self, **kwargs: Any) -> Dict[str, Any]:
42
+ """
43
+ Executes the action to add an entry to temporary memory.
44
+
45
+ Args:
46
+ **kwargs: Must contain 'content' and optionally 'role'.
47
+
48
+ Returns:
49
+ A dictionary confirming the action.
50
+ """
51
+ content = kwargs.get("content")
52
+ if not isinstance(content, str) or not content:
53
+ raise ValueError("The 'content' parameter must be a non-empty string.")
54
+
55
+ role = kwargs.get("role", "system")
56
+ if not isinstance(role, str):
57
+ raise ValueError("The 'role' parameter must be a string.")
58
+
59
+ await self.memory_manager.add_temp_memory(content=content, role=role)
60
+
61
+ return {"status": "success", "message": f"Added entry to temporary memory with role '{role}'."}