neuro-simulator 0.4.4__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/chatbot/__init__.py +0 -0
- neuro_simulator/chatbot/core.py +235 -0
- neuro_simulator/chatbot/llm.py +105 -0
- neuro_simulator/chatbot/memory/__init__.py +0 -0
- neuro_simulator/chatbot/memory/manager.py +90 -0
- neuro_simulator/chatbot/nickname_gen/__init__.py +0 -0
- neuro_simulator/chatbot/nickname_gen/generator.py +144 -0
- neuro_simulator/chatbot/prompts/__init__.py +0 -0
- neuro_simulator/chatbot/tools/__init__.py +0 -0
- neuro_simulator/chatbot/tools/add_temp_memory.py +51 -0
- neuro_simulator/chatbot/tools/base.py +47 -0
- neuro_simulator/chatbot/tools/manager.py +100 -0
- neuro_simulator/chatbot/tools/post_chat_message.py +49 -0
- neuro_simulator/config.yaml.example +16 -56
- neuro_simulator/core/application.py +38 -33
- neuro_simulator/core/config.py +14 -12
- neuro_simulator/core/path_manager.py +30 -9
- neuro_simulator/services/stream.py +4 -0
- neuro_simulator/utils/queue.py +13 -1
- {neuro_simulator-0.4.4.dist-info → neuro_simulator-0.5.0.dist-info}/METADATA +1 -1
- {neuro_simulator-0.4.4.dist-info → neuro_simulator-0.5.0.dist-info}/RECORD +24 -12
- neuro_simulator/services/audience.py +0 -102
- {neuro_simulator-0.4.4.dist-info → neuro_simulator-0.5.0.dist-info}/WHEEL +0 -0
- {neuro_simulator-0.4.4.dist-info → neuro_simulator-0.5.0.dist-info}/entry_points.txt +0 -0
- {neuro_simulator-0.4.4.dist-info → neuro_simulator-0.5.0.dist-info}/top_level.txt +0 -0
File without changes
|
@@ -0,0 +1,235 @@
|
|
1
|
+
# neuro_simulator/chatbot/core.py
|
2
|
+
"""
|
3
|
+
Core module for the Neuro Simulator's Chatbot agent.
|
4
|
+
Implements a dual-LLM "Actor/Thinker" architecture.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
import json
|
9
|
+
import logging
|
10
|
+
import re
|
11
|
+
import shutil
|
12
|
+
import importlib.resources
|
13
|
+
import os
|
14
|
+
from pathlib import Path
|
15
|
+
from typing import Any, Dict, List
|
16
|
+
from datetime import datetime
|
17
|
+
|
18
|
+
from ..core.path_manager import path_manager
|
19
|
+
from .llm import ChatbotLLMClient
|
20
|
+
from .memory.manager import ChatbotMemoryManager
|
21
|
+
from .tools.manager import ChatbotToolManager
|
22
|
+
from .nickname_gen.generator import NicknameGenerator
|
23
|
+
|
24
|
+
logger = logging.getLogger("neuro_chatbot")
|
25
|
+
|
26
|
+
class ChatbotAgent:
|
27
|
+
"""
|
28
|
+
Chatbot Agent class implementing the Actor/Thinker model.
|
29
|
+
"""
|
30
|
+
|
31
|
+
def __init__(self):
|
32
|
+
if not path_manager:
|
33
|
+
raise RuntimeError("PathManager must be initialized before the Chatbot Agent.")
|
34
|
+
|
35
|
+
self.memory_manager = ChatbotMemoryManager(path_manager.chatbot_memories_dir)
|
36
|
+
self.tool_manager = ChatbotToolManager(self.memory_manager)
|
37
|
+
self.nickname_generator = NicknameGenerator()
|
38
|
+
|
39
|
+
self.chatbot_llm = ChatbotLLMClient()
|
40
|
+
self.memory_llm = ChatbotLLMClient()
|
41
|
+
|
42
|
+
self._initialized = False
|
43
|
+
self.turn_counter = 0
|
44
|
+
self.reflection_threshold = 5 # Consolidate memory every 5 turns
|
45
|
+
|
46
|
+
async def initialize(self):
|
47
|
+
"""Initialize the agent, copying default files and loading components."""
|
48
|
+
if not self._initialized:
|
49
|
+
logger.info("Initializing Chatbot Agent...")
|
50
|
+
self._setup_working_directory()
|
51
|
+
self.tool_manager.load_tools()
|
52
|
+
await self.memory_manager.initialize()
|
53
|
+
await self.nickname_generator.initialize()
|
54
|
+
self._initialized = True
|
55
|
+
logger.info("Chatbot Agent initialized successfully.")
|
56
|
+
|
57
|
+
def _setup_working_directory(self):
|
58
|
+
"""Ensures the chatbot's working directory is populated with default files."""
|
59
|
+
logger.info("Setting up chatbot working directory...")
|
60
|
+
try:
|
61
|
+
package_source_dir = Path(importlib.resources.files('neuro_simulator'))
|
62
|
+
except (ModuleNotFoundError, AttributeError):
|
63
|
+
package_source_dir = Path(__file__).parent.parent
|
64
|
+
|
65
|
+
files_to_copy = {
|
66
|
+
"chatbot/prompts/chatbot_prompt.txt": path_manager.chatbot_agent_prompt_path,
|
67
|
+
"chatbot/prompts/memory_prompt.txt": path_manager.chatbot_memory_agent_prompt_path,
|
68
|
+
"chatbot/memory/init_memory.json": path_manager.chatbot_init_memory_path,
|
69
|
+
"chatbot/memory/core_memory.json": path_manager.chatbot_core_memory_path,
|
70
|
+
"chatbot/memory/temp_memory.json": path_manager.chatbot_temp_memory_path,
|
71
|
+
"chatbot/nickname_gen/data/adjectives.txt": path_manager.chatbot_nickname_data_dir / "adjectives.txt",
|
72
|
+
"chatbot/nickname_gen/data/nouns.txt": path_manager.chatbot_nickname_data_dir / "nouns.txt",
|
73
|
+
"chatbot/nickname_gen/data/special_users.txt": path_manager.chatbot_nickname_data_dir / "special_users.txt",
|
74
|
+
}
|
75
|
+
|
76
|
+
for src_rel_path, dest_path in files_to_copy.items():
|
77
|
+
if not dest_path.exists():
|
78
|
+
source_path = package_source_dir / src_rel_path
|
79
|
+
if source_path.exists():
|
80
|
+
try:
|
81
|
+
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
82
|
+
shutil.copy(source_path, dest_path)
|
83
|
+
logger.info(f"Copied default file to {dest_path}")
|
84
|
+
except Exception as e:
|
85
|
+
logger.error(f"Could not copy default file from {source_path}: {e}")
|
86
|
+
|
87
|
+
self._copy_builtin_tools(package_source_dir)
|
88
|
+
|
89
|
+
def _copy_builtin_tools(self, package_source_dir: Path):
|
90
|
+
"""Copies the packaged built-in tools to the working directory."""
|
91
|
+
source_dir = package_source_dir / "chatbot" / "tools"
|
92
|
+
dest_dir = path_manager.chatbot_builtin_tools_dir
|
93
|
+
|
94
|
+
if not source_dir.exists():
|
95
|
+
logger.warning(f"Default chatbot tools source directory not found at {source_dir}")
|
96
|
+
return
|
97
|
+
|
98
|
+
dest_dir.mkdir(parents=True, exist_ok=True)
|
99
|
+
|
100
|
+
for item in os.listdir(source_dir):
|
101
|
+
source_item = source_dir / item
|
102
|
+
if source_item.is_file() and source_item.name.endswith('.py') and not item.startswith(('__', 'base', 'manager')):
|
103
|
+
dest_item = dest_dir / item
|
104
|
+
if not dest_item.exists():
|
105
|
+
shutil.copy(source_item, dest_item)
|
106
|
+
logger.info(f"Copied default chatbot tool to {dest_item}")
|
107
|
+
|
108
|
+
async def _append_to_history(self, file_path: Path, data: Dict[str, Any]):
|
109
|
+
"""Appends a new entry to a JSON Lines history file."""
|
110
|
+
data['timestamp'] = datetime.now().isoformat()
|
111
|
+
with open(file_path, 'a', encoding='utf-8') as f:
|
112
|
+
f.write(json.dumps(data, ensure_ascii=False) + '\n')
|
113
|
+
|
114
|
+
async def _read_history(self, file_path: Path, limit: int) -> List[Dict[str, Any]]:
|
115
|
+
"""Reads the last N lines from a JSON Lines history file."""
|
116
|
+
if not file_path.exists(): return []
|
117
|
+
try:
|
118
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
119
|
+
lines = f.readlines()
|
120
|
+
return [json.loads(line) for line in lines[-limit:]]
|
121
|
+
except (json.JSONDecodeError, IndexError): return []
|
122
|
+
|
123
|
+
def _format_tool_schemas_for_prompt(self, agent_name: str) -> str:
|
124
|
+
"""Formats tool schemas for a specific agent (actor or thinker)."""
|
125
|
+
schemas = self.tool_manager.get_tool_schemas_for_agent(agent_name)
|
126
|
+
if not schemas: return "No tools available."
|
127
|
+
lines = ["Available tools:"]
|
128
|
+
for i, schema in enumerate(schemas):
|
129
|
+
params = ", ".join([f"{p.get('name')}: {p.get('type')}" for p in schema.get("parameters", [])])
|
130
|
+
lines.append(f"{i+1}. {schema.get('name')}({params}) - {schema.get('description')}")
|
131
|
+
return "\n".join(lines)
|
132
|
+
|
133
|
+
async def _build_chatbot_prompt(self, neuro_speech: str, recent_history: List[Dict[str, str]]) -> str:
|
134
|
+
"""Builds the prompt for the Chatbot (Actor) LLM."""
|
135
|
+
with open(path_manager.chatbot_agent_prompt_path, 'r', encoding='utf-8') as f:
|
136
|
+
prompt_template = f.read()
|
137
|
+
|
138
|
+
tool_descriptions = self._format_tool_schemas_for_prompt('chatbot_agent')
|
139
|
+
init_memory_text = json.dumps(self.memory_manager.init_memory, indent=2)
|
140
|
+
core_memory_text = json.dumps(self.memory_manager.core_memory, indent=2)
|
141
|
+
temp_memory_text = json.dumps(self.memory_manager.temp_memory, indent=2)
|
142
|
+
recent_history_text = "\n".join([f"{msg.get('role')}: {msg.get('content')}" for msg in recent_history])
|
143
|
+
|
144
|
+
from ..core.config import config_manager
|
145
|
+
chats_per_batch = config_manager.settings.chatbot_agent.chats_per_batch
|
146
|
+
|
147
|
+
return prompt_template.format(
|
148
|
+
tool_descriptions=tool_descriptions,
|
149
|
+
init_memory=init_memory_text,
|
150
|
+
core_memory=core_memory_text,
|
151
|
+
temp_memory=temp_memory_text,
|
152
|
+
recent_history=recent_history_text,
|
153
|
+
neuro_speech=neuro_speech,
|
154
|
+
chats_per_batch=chats_per_batch
|
155
|
+
)
|
156
|
+
|
157
|
+
async def _build_memory_prompt(self, conversation_history: List[Dict[str, str]]) -> str:
|
158
|
+
"""Builds the prompt for the Memory (Thinker) LLM."""
|
159
|
+
with open(path_manager.chatbot_memory_agent_prompt_path, 'r', encoding='utf-8') as f:
|
160
|
+
prompt_template = f.read()
|
161
|
+
tool_descriptions = self._format_tool_schemas_for_prompt('chatbot_memory_agent')
|
162
|
+
history_text = "\n".join([f"{msg.get('role')}: {msg.get('content')}" for msg in conversation_history])
|
163
|
+
return prompt_template.format(tool_descriptions=tool_descriptions, conversation_history=history_text)
|
164
|
+
|
165
|
+
def _parse_tool_calls(self, response_text: str) -> List[Dict[str, Any]]:
|
166
|
+
"""Extracts and parses a JSON array from the LLM's response text."""
|
167
|
+
try:
|
168
|
+
# Find the start and end of the main JSON array
|
169
|
+
start_index = response_text.find('[')
|
170
|
+
end_index = response_text.rfind(']')
|
171
|
+
|
172
|
+
if start_index != -1 and end_index != -1 and end_index > start_index:
|
173
|
+
json_str = response_text[start_index : end_index + 1]
|
174
|
+
return json.loads(json_str)
|
175
|
+
else:
|
176
|
+
logger.warning(f"Could not find a valid JSON array in response: {response_text}")
|
177
|
+
return []
|
178
|
+
except Exception as e:
|
179
|
+
logger.error(f"Failed to parse tool calls from LLM response: {e}\nRaw text: {response_text}")
|
180
|
+
return []
|
181
|
+
|
182
|
+
async def _execute_tool_calls(self, tool_calls: List[Dict[str, Any]], agent_name: str) -> List[Dict[str, str]]:
|
183
|
+
generated_messages = []
|
184
|
+
for tool_call in tool_calls:
|
185
|
+
tool_name = tool_call.get("name")
|
186
|
+
params = tool_call.get("params", {})
|
187
|
+
result = await self.tool_manager.execute_tool(tool_name, **params)
|
188
|
+
|
189
|
+
if agent_name == 'chatbot_agent' and tool_name == "post_chat_message" and result.get("status") == "success":
|
190
|
+
text_to_post = result.get("text_to_post", "")
|
191
|
+
if text_to_post:
|
192
|
+
nickname = self.nickname_generator.generate_nickname()
|
193
|
+
message = {"username": nickname, "text": text_to_post}
|
194
|
+
generated_messages.append(message)
|
195
|
+
await self._append_to_history(path_manager.chatbot_agent_history_path, {'role': 'assistant', 'content': f"{nickname}: {text_to_post}"})
|
196
|
+
logger.info(f"Returning generated messages: {generated_messages}")
|
197
|
+
return generated_messages
|
198
|
+
|
199
|
+
async def generate_chat_messages(self, neuro_speech: str, recent_history: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
200
|
+
"""The main actor loop to generate chat messages."""
|
201
|
+
for entry in recent_history:
|
202
|
+
await self._append_to_history(path_manager.chatbot_agent_history_path, entry)
|
203
|
+
|
204
|
+
prompt = await self._build_chatbot_prompt(neuro_speech, recent_history)
|
205
|
+
response_text = await self.chatbot_llm.generate(prompt)
|
206
|
+
if not response_text: return []
|
207
|
+
|
208
|
+
tool_calls = self._parse_tool_calls(response_text)
|
209
|
+
if not tool_calls: return []
|
210
|
+
|
211
|
+
messages = await self._execute_tool_calls(tool_calls, 'chatbot_agent')
|
212
|
+
|
213
|
+
self.turn_counter += 1
|
214
|
+
if self.turn_counter >= self.reflection_threshold:
|
215
|
+
asyncio.create_task(self._reflect_and_consolidate())
|
216
|
+
|
217
|
+
return messages
|
218
|
+
|
219
|
+
async def _reflect_and_consolidate(self):
|
220
|
+
"""The main thinker loop to consolidate memories."""
|
221
|
+
logger.info("Chatbot is reflecting on recent conversations...")
|
222
|
+
self.turn_counter = 0
|
223
|
+
history = await self._read_history(path_manager.chatbot_agent_history_path, limit=50)
|
224
|
+
if len(history) < self.reflection_threshold:
|
225
|
+
return
|
226
|
+
|
227
|
+
prompt = await self._build_memory_prompt(history)
|
228
|
+
response_text = await self.memory_llm.generate(prompt)
|
229
|
+
if not response_text: return
|
230
|
+
|
231
|
+
tool_calls = self._parse_tool_calls(response_text)
|
232
|
+
if not tool_calls: return
|
233
|
+
|
234
|
+
await self._execute_tool_calls(tool_calls, 'chatbot_memory_agent')
|
235
|
+
logger.info("Chatbot memory consolidation complete.")
|
@@ -0,0 +1,105 @@
|
|
1
|
+
# neuro_simulator/chatbot/llm.py
|
2
|
+
"""
|
3
|
+
LLM client for the Neuro Simulator's Chatbot agent.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import logging
|
8
|
+
from typing import Any
|
9
|
+
|
10
|
+
from google import genai
|
11
|
+
from google.genai import types
|
12
|
+
from openai import AsyncOpenAI
|
13
|
+
|
14
|
+
from ..core.config import config_manager
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "chatbot", 1))
|
17
|
+
|
18
|
+
class ChatbotLLMClient:
|
19
|
+
"""A completely independent LLM client for the chatbot agent, with lazy initialization."""
|
20
|
+
|
21
|
+
def __init__(self):
|
22
|
+
self.client: Any = None
|
23
|
+
self.model_name: str | None = None
|
24
|
+
self._generate_func = None
|
25
|
+
self._initialized = False
|
26
|
+
|
27
|
+
async def _ensure_initialized(self):
|
28
|
+
"""Initializes the client on first use."""
|
29
|
+
if self._initialized:
|
30
|
+
return
|
31
|
+
|
32
|
+
logger.info("First use of Chatbot's LLMClient, performing initialization...")
|
33
|
+
settings = config_manager.settings
|
34
|
+
# Use the new chatbot_agent config block
|
35
|
+
provider = settings.chatbot_agent.agent_provider.lower()
|
36
|
+
|
37
|
+
if provider == "gemini":
|
38
|
+
api_key = settings.api_keys.gemini_api_key
|
39
|
+
if not api_key:
|
40
|
+
raise ValueError("GEMINI_API_KEY is not set in configuration for the chatbot agent.")
|
41
|
+
|
42
|
+
genai.configure(api_key=api_key)
|
43
|
+
self.model_name = settings.chatbot_agent.agent_model
|
44
|
+
self._generate_func = self._generate_gemini
|
45
|
+
|
46
|
+
elif provider == "openai":
|
47
|
+
api_key = settings.api_keys.openai_api_key
|
48
|
+
if not api_key:
|
49
|
+
raise ValueError("OPENAI_API_KEY is not set in configuration for the chatbot agent.")
|
50
|
+
|
51
|
+
self.model_name = settings.chatbot_agent.agent_model
|
52
|
+
self.client = AsyncOpenAI(
|
53
|
+
api_key=api_key,
|
54
|
+
base_url=settings.api_keys.openai_api_base_url
|
55
|
+
)
|
56
|
+
self._generate_func = self._generate_openai
|
57
|
+
else:
|
58
|
+
raise ValueError(f"Unsupported agent provider in chatbot_agent config: {provider}")
|
59
|
+
|
60
|
+
self._initialized = True
|
61
|
+
logger.info(f"Chatbot LLM client initialized. Provider: {provider.upper()}, Model: {self.model_name}")
|
62
|
+
|
63
|
+
async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
|
64
|
+
"""Generates text using the Gemini model."""
|
65
|
+
generation_config = types.GenerationConfig(
|
66
|
+
max_output_tokens=max_tokens,
|
67
|
+
)
|
68
|
+
model = genai.GenerativeModel(self.model_name)
|
69
|
+
try:
|
70
|
+
response = await model.generate_content_async(
|
71
|
+
contents=prompt,
|
72
|
+
generation_config=generation_config
|
73
|
+
)
|
74
|
+
return response.text
|
75
|
+
except Exception as e:
|
76
|
+
logger.error(f"Error in chatbot _generate_gemini: {e}", exc_info=True)
|
77
|
+
return ""
|
78
|
+
|
79
|
+
async def _generate_openai(self, prompt: str, max_tokens: int) -> str:
|
80
|
+
"""Generates text using the OpenAI model."""
|
81
|
+
try:
|
82
|
+
response = await self.client.chat.completions.create(
|
83
|
+
model=self.model_name,
|
84
|
+
messages=[{"role": "user", "content": prompt}],
|
85
|
+
max_tokens=max_tokens,
|
86
|
+
)
|
87
|
+
if response.choices and response.choices[0].message and response.choices[0].message.content:
|
88
|
+
return response.choices[0].message.content.strip()
|
89
|
+
return ""
|
90
|
+
except Exception as e:
|
91
|
+
logger.error(f"Error in chatbot _generate_openai: {e}", exc_info=True)
|
92
|
+
return ""
|
93
|
+
|
94
|
+
async def generate(self, prompt: str, max_tokens: int = 1000) -> str:
|
95
|
+
"""Generate text using the configured LLM, ensuring client is initialized."""
|
96
|
+
await self._ensure_initialized()
|
97
|
+
|
98
|
+
if not self._generate_func:
|
99
|
+
raise RuntimeError("Chatbot LLM Client could not be initialized.")
|
100
|
+
try:
|
101
|
+
result = await self._generate_func(prompt, max_tokens)
|
102
|
+
return result if result is not None else ""
|
103
|
+
except Exception as e:
|
104
|
+
logger.error(f"Error generating text with Chatbot LLM: {e}", exc_info=True)
|
105
|
+
return "My brain is not working, tell Vedal to check the logs."
|
File without changes
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# neuro_simulator/chatbot/memory/manager.py
|
2
|
+
"""
|
3
|
+
Manages the chatbot agent's shared memory state (init, core, temp).
|
4
|
+
"""
|
5
|
+
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
import random
|
9
|
+
import string
|
10
|
+
from typing import Any, Dict, List, Optional
|
11
|
+
from datetime import datetime
|
12
|
+
from pathlib import Path
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "chatbot", 1))
|
15
|
+
|
16
|
+
def generate_id(length=6) -> str:
|
17
|
+
"""Generate a random ID string."""
|
18
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
19
|
+
|
20
|
+
class ChatbotMemoryManager:
|
21
|
+
"""Manages the three types of shared memory for the chatbot agent."""
|
22
|
+
|
23
|
+
def __init__(self, memory_root_path: Path):
|
24
|
+
"""Initializes the MemoryManager with a specific root path for its memory files."""
|
25
|
+
if not memory_root_path:
|
26
|
+
raise ValueError("memory_root_path must be provided.")
|
27
|
+
|
28
|
+
self.init_memory_file = memory_root_path / "init_memory.json"
|
29
|
+
self.core_memory_file = memory_root_path / "core_memory.json"
|
30
|
+
self.temp_memory_file = memory_root_path / "temp_memory.json"
|
31
|
+
|
32
|
+
self.init_memory: Dict[str, Any] = {}
|
33
|
+
self.core_memory: Dict[str, Any] = {}
|
34
|
+
self.temp_memory: List[Dict[str, Any]] = []
|
35
|
+
|
36
|
+
async def initialize(self):
|
37
|
+
"""Load all memory types from files. Assumes files have been created by the setup mechanism."""
|
38
|
+
# Load init memory
|
39
|
+
if self.init_memory_file.exists():
|
40
|
+
with open(self.init_memory_file, 'r', encoding='utf-8') as f:
|
41
|
+
self.init_memory = json.load(f)
|
42
|
+
else:
|
43
|
+
logger.error(f"Init memory file not found at {self.init_memory_file}, proceeding with empty memory.")
|
44
|
+
self.init_memory = {}
|
45
|
+
|
46
|
+
# Load core memory
|
47
|
+
if self.core_memory_file.exists():
|
48
|
+
with open(self.core_memory_file, 'r', encoding='utf-8') as f:
|
49
|
+
self.core_memory = json.load(f)
|
50
|
+
else:
|
51
|
+
logger.error(f"Core memory file not found at {self.core_memory_file}, proceeding with empty memory.")
|
52
|
+
self.core_memory = {"blocks": {}}
|
53
|
+
|
54
|
+
# Load temp memory
|
55
|
+
if self.temp_memory_file.exists():
|
56
|
+
with open(self.temp_memory_file, 'r', encoding='utf-8') as f:
|
57
|
+
self.temp_memory = json.load(f)
|
58
|
+
else:
|
59
|
+
# This is less critical, can start empty
|
60
|
+
self.temp_memory = []
|
61
|
+
await self._save_temp_memory()
|
62
|
+
|
63
|
+
logger.info(f"Chatbot MemoryManager initialized from {self.init_memory_file.parent}.")
|
64
|
+
|
65
|
+
async def _save_init_memory(self):
|
66
|
+
with open(self.init_memory_file, 'w', encoding='utf-8') as f:
|
67
|
+
json.dump(self.init_memory, f, ensure_ascii=False, indent=2)
|
68
|
+
|
69
|
+
async def _save_core_memory(self):
|
70
|
+
with open(self.core_memory_file, 'w', encoding='utf-8') as f:
|
71
|
+
json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
|
72
|
+
|
73
|
+
async def _save_temp_memory(self):
|
74
|
+
with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
|
75
|
+
json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
|
76
|
+
|
77
|
+
async def reset_temp_memory(self):
|
78
|
+
"""Reset temp memory to a default empty state."""
|
79
|
+
self.temp_memory = []
|
80
|
+
await self._save_temp_memory()
|
81
|
+
logger.info("Chatbot temp memory has been reset.")
|
82
|
+
|
83
|
+
async def add_temp_memory(self, content: str, role: str = "system"):
|
84
|
+
self.temp_memory.append({"id": generate_id(), "content": content, "role": role, "timestamp": datetime.now().isoformat()})
|
85
|
+
if len(self.temp_memory) > 20:
|
86
|
+
self.temp_memory = self.temp_memory[-20:]
|
87
|
+
await self._save_temp_memory()
|
88
|
+
|
89
|
+
async def get_core_memory_blocks(self) -> Dict[str, Any]:
|
90
|
+
return self.core_memory.get("blocks", {})
|
File without changes
|
@@ -0,0 +1,144 @@
|
|
1
|
+
# neuro_simulator/chatbot/nickname_gen/generator.py
|
2
|
+
"""
|
3
|
+
Nickname generator for the chatbot agent.
|
4
|
+
Implements a dual-pool system (base and dynamic) with multiple generation strategies.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
import random
|
9
|
+
from pathlib import Path
|
10
|
+
from typing import List, Dict, Callable
|
11
|
+
|
12
|
+
from ..llm import ChatbotLLMClient
|
13
|
+
from ...core.config import config_manager
|
14
|
+
from ...core.path_manager import path_manager
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "chatbot", 1))
|
17
|
+
|
18
|
+
class NicknameGenerator:
|
19
|
+
"""Generates diverse nicknames using a multi-strategy, dual-pool system."""
|
20
|
+
|
21
|
+
def __init__(self):
|
22
|
+
if not path_manager:
|
23
|
+
raise RuntimeError("PathManager must be initialized before NicknameGenerator.")
|
24
|
+
|
25
|
+
self.base_adjectives: List[str] = []
|
26
|
+
self.base_nouns: List[str] = []
|
27
|
+
self.special_users: List[str] = []
|
28
|
+
|
29
|
+
self.dynamic_adjectives: List[str] = []
|
30
|
+
self.dynamic_nouns: List[str] = []
|
31
|
+
|
32
|
+
self.llm_client = ChatbotLLMClient()
|
33
|
+
|
34
|
+
def _load_word_pool(self, filename: str) -> List[str]:
|
35
|
+
"""Loads a word pool from the nickname_gen/data directory."""
|
36
|
+
file_path = path_manager.chatbot_nickname_data_dir / filename
|
37
|
+
if not file_path.exists():
|
38
|
+
logger.warning(f"Nickname pool file not found: {file_path}. The pool will be empty.")
|
39
|
+
return []
|
40
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
41
|
+
return [line.strip() for line in f if line.strip()]
|
42
|
+
|
43
|
+
async def initialize(self):
|
44
|
+
"""Loads base pools and attempts to generate dynamic pools."""
|
45
|
+
logger.info("Initializing NicknameGenerator...")
|
46
|
+
self.base_adjectives = self._load_word_pool("adjectives.txt")
|
47
|
+
self.base_nouns = self._load_word_pool("nouns.txt")
|
48
|
+
self.special_users = self._load_word_pool("special_users.txt")
|
49
|
+
|
50
|
+
if not self.base_adjectives or not self.base_nouns:
|
51
|
+
logger.warning("Base adjective or noun pools are empty. Nickname generation quality will be affected.")
|
52
|
+
|
53
|
+
if config_manager.settings.chatbot_agent.nickname_generation.enable_dynamic_pool:
|
54
|
+
await self._populate_dynamic_pools()
|
55
|
+
|
56
|
+
logger.info("NicknameGenerator initialized.")
|
57
|
+
|
58
|
+
async def _populate_dynamic_pools(self):
|
59
|
+
"""Uses an LLM to generate and populate the dynamic word pools."""
|
60
|
+
logger.info("Attempting to populate dynamic nickname pools using LLM...")
|
61
|
+
pool_size = config_manager.settings.chatbot_agent.nickname_generation.dynamic_pool_size
|
62
|
+
try:
|
63
|
+
adj_prompt = f"Generate a list of {pool_size} diverse, cool-sounding English adjectives for online usernames. Output only the words, one per line."
|
64
|
+
noun_prompt = f"Generate a list of {pool_size} diverse, cool-sounding English nouns for online usernames. Output only the words, one per line."
|
65
|
+
|
66
|
+
adj_list_str = await self.llm_client.generate(adj_prompt, max_tokens=pool_size * 10)
|
67
|
+
noun_list_str = await self.llm_client.generate(noun_prompt, max_tokens=pool_size * 10)
|
68
|
+
|
69
|
+
self.dynamic_adjectives = [line.strip() for line in adj_list_str.split('\n') if line.strip()]
|
70
|
+
self.dynamic_nouns = [line.strip() for line in noun_list_str.split('\n') if line.strip()]
|
71
|
+
|
72
|
+
if self.dynamic_adjectives and self.dynamic_nouns:
|
73
|
+
logger.info(f"Successfully populated dynamic pools with {len(self.dynamic_adjectives)} adjectives and {len(self.dynamic_nouns)} nouns.")
|
74
|
+
else:
|
75
|
+
logger.warning("LLM generated empty lists for dynamic pools.")
|
76
|
+
|
77
|
+
except Exception as e:
|
78
|
+
logger.error(f"Failed to generate dynamic nickname pool: {e}. Falling back to base pool only.", exc_info=True)
|
79
|
+
self.dynamic_adjectives = []
|
80
|
+
self.dynamic_nouns = []
|
81
|
+
|
82
|
+
def _get_combined_pools(self) -> tuple[List[str], List[str]]:
|
83
|
+
"""Returns the combination of base and dynamic pools."""
|
84
|
+
adjectives = self.base_adjectives + self.dynamic_adjectives
|
85
|
+
nouns = self.base_nouns + self.dynamic_nouns
|
86
|
+
return adjectives, nouns
|
87
|
+
|
88
|
+
def _generate_from_word_pools(self) -> str:
|
89
|
+
adjectives, nouns = self._get_combined_pools()
|
90
|
+
if not adjectives or not nouns:
|
91
|
+
return self._generate_random_numeric() # Fallback
|
92
|
+
|
93
|
+
noun = random.choice(nouns)
|
94
|
+
# 50% chance to add an adjective
|
95
|
+
if random.random() < 0.5:
|
96
|
+
adjective = random.choice(adjectives)
|
97
|
+
# Formatting variations
|
98
|
+
format_choice = random.random()
|
99
|
+
if format_choice < 0.4:
|
100
|
+
return f"{adjective.capitalize()}{noun.capitalize()}"
|
101
|
+
elif format_choice < 0.7:
|
102
|
+
return f"{adjective.lower()}_{noun.lower()}"
|
103
|
+
else:
|
104
|
+
return f"{adjective.lower()}{noun.lower()}"
|
105
|
+
else:
|
106
|
+
# Add a number suffix 30% of the time
|
107
|
+
if random.random() < 0.3:
|
108
|
+
return f"{noun.capitalize()}{random.randint(1, 999)}"
|
109
|
+
return noun.capitalize()
|
110
|
+
|
111
|
+
def _generate_from_special_pool(self) -> str:
|
112
|
+
if not self.special_users:
|
113
|
+
return self._generate_from_word_pools() # Fallback
|
114
|
+
return random.choice(self.special_users)
|
115
|
+
|
116
|
+
def _generate_random_numeric(self) -> str:
|
117
|
+
return f"user{random.randint(10000, 99999)}"
|
118
|
+
|
119
|
+
def generate_nickname(self) -> str:
|
120
|
+
"""Generates a single nickname based on weighted strategies."""
|
121
|
+
strategies: Dict[Callable[[], str], int] = {
|
122
|
+
self._generate_from_word_pools: 70,
|
123
|
+
self._generate_from_special_pool: 15,
|
124
|
+
self._generate_random_numeric: 15,
|
125
|
+
}
|
126
|
+
|
127
|
+
# Filter out strategies that can't be run (e.g., empty special pool)
|
128
|
+
if not self.special_users:
|
129
|
+
strategies.pop(self._generate_from_special_pool, None)
|
130
|
+
# Redistribute weight
|
131
|
+
if strategies:
|
132
|
+
total_weight = sum(strategies.values())
|
133
|
+
strategies = {k: v/total_weight * 100 for k,v in strategies.items()}
|
134
|
+
|
135
|
+
if not any(self._get_combined_pools()):
|
136
|
+
strategies = {self._generate_random_numeric: 100}
|
137
|
+
|
138
|
+
chosen_strategy = random.choices(
|
139
|
+
population=list(strategies.keys()),
|
140
|
+
weights=list(strategies.values()),
|
141
|
+
k=1
|
142
|
+
)[0]
|
143
|
+
|
144
|
+
return chosen_strategy()
|
File without changes
|
File without changes
|
@@ -0,0 +1,51 @@
|
|
1
|
+
# neuro_simulator/chatbot/tools/add_temp_memory.py
|
2
|
+
"""The Add Temp Memory tool for the chatbot agent."""
|
3
|
+
|
4
|
+
from typing import Any, Dict, List
|
5
|
+
|
6
|
+
from neuro_simulator.chatbot.tools.base import BaseChatbotTool
|
7
|
+
from neuro_simulator.chatbot.memory.manager import ChatbotMemoryManager
|
8
|
+
|
9
|
+
class AddTempMemoryTool(BaseChatbotTool):
|
10
|
+
"""Tool to add an entry to the chatbot's temporary memory."""
|
11
|
+
|
12
|
+
def __init__(self, memory_manager: ChatbotMemoryManager):
|
13
|
+
self.memory_manager = memory_manager
|
14
|
+
|
15
|
+
@property
|
16
|
+
def name(self) -> str:
|
17
|
+
return "add_temp_memory"
|
18
|
+
|
19
|
+
@property
|
20
|
+
def description(self) -> str:
|
21
|
+
return "Adds an entry to the temporary memory. Use for short-term observations, recent facts, or topics to bring up soon."
|
22
|
+
|
23
|
+
@property
|
24
|
+
def parameters(self) -> List[Dict[str, Any]]:
|
25
|
+
return [
|
26
|
+
{
|
27
|
+
"name": "content",
|
28
|
+
"type": "string",
|
29
|
+
"description": "The content of the memory entry.",
|
30
|
+
"required": True,
|
31
|
+
},
|
32
|
+
{
|
33
|
+
"name": "role",
|
34
|
+
"type": "string",
|
35
|
+
"description": "The role associated with the memory (e.g., 'system', 'user'). Defaults to 'system'.",
|
36
|
+
"required": False,
|
37
|
+
}
|
38
|
+
]
|
39
|
+
|
40
|
+
async def execute(self, **kwargs: Any) -> Dict[str, Any]:
|
41
|
+
content = kwargs.get("content")
|
42
|
+
if not isinstance(content, str) or not content:
|
43
|
+
raise ValueError("The 'content' parameter must be a non-empty string.")
|
44
|
+
|
45
|
+
role = kwargs.get("role", "system")
|
46
|
+
if not isinstance(role, str):
|
47
|
+
raise ValueError("The 'role' parameter must be a string.")
|
48
|
+
|
49
|
+
await self.memory_manager.add_temp_memory(content=content, role=role)
|
50
|
+
|
51
|
+
return {"status": "success", "message": f"Added entry to temporary memory with role '{role}'."}
|