claude-mpm 4.0.20__py3-none-any.whl → 4.0.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/BUILD_NUMBER +1 -1
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/INSTRUCTIONS.md +74 -0
- claude_mpm/agents/WORKFLOW.md +308 -4
- claude_mpm/agents/agents_metadata.py +52 -0
- claude_mpm/agents/base_agent_loader.py +75 -19
- claude_mpm/agents/templates/__init__.py +4 -0
- claude_mpm/agents/templates/api_qa.json +206 -0
- claude_mpm/agents/templates/research.json +24 -16
- claude_mpm/agents/templates/ticketing.json +18 -5
- claude_mpm/agents/templates/vercel_ops_agent.json +281 -0
- claude_mpm/agents/templates/vercel_ops_instructions.md +582 -0
- claude_mpm/cli/commands/mcp_command_router.py +87 -1
- claude_mpm/cli/commands/mcp_install_commands.py +207 -26
- claude_mpm/cli/parsers/mcp_parser.py +23 -0
- claude_mpm/constants.py +1 -0
- claude_mpm/core/base_service.py +7 -1
- claude_mpm/core/config.py +64 -39
- claude_mpm/core/framework_loader.py +68 -28
- claude_mpm/core/interactive_session.py +28 -17
- claude_mpm/scripts/socketio_daemon.py +67 -7
- claude_mpm/scripts/socketio_daemon_hardened.py +897 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +65 -3
- claude_mpm/services/agents/deployment/async_agent_deployment.py +65 -1
- claude_mpm/services/agents/memory/agent_memory_manager.py +42 -203
- claude_mpm/services/memory_hook_service.py +62 -4
- claude_mpm/services/runner_configuration_service.py +5 -9
- claude_mpm/services/socketio/server/broadcaster.py +32 -1
- claude_mpm/services/socketio/server/core.py +4 -0
- claude_mpm/services/socketio/server/main.py +23 -4
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/METADATA +1 -1
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/RECORD +36 -32
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/WHEEL +0 -0
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.0.20.dist-info → claude_mpm-4.0.22.dist-info}/top_level.txt +0 -0
|
@@ -166,12 +166,12 @@ class AgentDeploymentService(AgentDeploymentInterface):
|
|
|
166
166
|
# Initialize multi-source deployment service for version comparison
|
|
167
167
|
self.multi_source_service = MultiSourceAgentDeploymentService()
|
|
168
168
|
|
|
169
|
-
# Find base agent file
|
|
169
|
+
# Find base agent file with priority-based search
|
|
170
170
|
if base_agent_path:
|
|
171
171
|
self.base_agent_path = Path(base_agent_path)
|
|
172
172
|
else:
|
|
173
|
-
#
|
|
174
|
-
self.base_agent_path =
|
|
173
|
+
# Priority-based search for base_agent.json
|
|
174
|
+
self.base_agent_path = self._find_base_agent_file()
|
|
175
175
|
|
|
176
176
|
# Initialize configuration manager (after base_agent_path is set)
|
|
177
177
|
self.configuration_manager = AgentConfigurationManager(self.base_agent_path)
|
|
@@ -181,6 +181,68 @@ class AgentDeploymentService(AgentDeploymentInterface):
|
|
|
181
181
|
|
|
182
182
|
self.logger.info(f"Templates directory: {self.templates_dir}")
|
|
183
183
|
self.logger.info(f"Base agent path: {self.base_agent_path}")
|
|
184
|
+
|
|
185
|
+
def _find_base_agent_file(self) -> Path:
|
|
186
|
+
"""Find base agent file with priority-based search.
|
|
187
|
+
|
|
188
|
+
Priority order:
|
|
189
|
+
1. Environment variable override (CLAUDE_MPM_BASE_AGENT_PATH)
|
|
190
|
+
2. Current working directory (for local development)
|
|
191
|
+
3. Known development locations
|
|
192
|
+
4. User override location (~/.claude/agents/)
|
|
193
|
+
5. Framework agents directory (from paths)
|
|
194
|
+
"""
|
|
195
|
+
# Priority 0: Check environment variable override
|
|
196
|
+
env_path = os.environ.get("CLAUDE_MPM_BASE_AGENT_PATH")
|
|
197
|
+
if env_path:
|
|
198
|
+
env_base_agent = Path(env_path)
|
|
199
|
+
if env_base_agent.exists():
|
|
200
|
+
self.logger.info(f"Using environment variable base_agent: {env_base_agent}")
|
|
201
|
+
return env_base_agent
|
|
202
|
+
else:
|
|
203
|
+
self.logger.warning(f"CLAUDE_MPM_BASE_AGENT_PATH set but file doesn't exist: {env_base_agent}")
|
|
204
|
+
|
|
205
|
+
# Priority 1: Check current working directory for local development
|
|
206
|
+
cwd = Path.cwd()
|
|
207
|
+
cwd_base_agent = cwd / "src" / "claude_mpm" / "agents" / "base_agent.json"
|
|
208
|
+
if cwd_base_agent.exists():
|
|
209
|
+
self.logger.info(f"Using local development base_agent from cwd: {cwd_base_agent}")
|
|
210
|
+
return cwd_base_agent
|
|
211
|
+
|
|
212
|
+
# Priority 2: Check known development locations
|
|
213
|
+
known_dev_paths = [
|
|
214
|
+
Path("/Users/masa/Projects/claude-mpm/src/claude_mpm/agents/base_agent.json"),
|
|
215
|
+
Path.home() / "Projects" / "claude-mpm" / "src" / "claude_mpm" / "agents" / "base_agent.json",
|
|
216
|
+
Path.home() / "projects" / "claude-mpm" / "src" / "claude_mpm" / "agents" / "base_agent.json",
|
|
217
|
+
]
|
|
218
|
+
|
|
219
|
+
for dev_path in known_dev_paths:
|
|
220
|
+
if dev_path.exists():
|
|
221
|
+
self.logger.info(f"Using development base_agent: {dev_path}")
|
|
222
|
+
return dev_path
|
|
223
|
+
|
|
224
|
+
# Priority 3: Check user override location
|
|
225
|
+
user_base_agent = Path.home() / ".claude" / "agents" / "base_agent.json"
|
|
226
|
+
if user_base_agent.exists():
|
|
227
|
+
self.logger.info(f"Using user override base_agent: {user_base_agent}")
|
|
228
|
+
return user_base_agent
|
|
229
|
+
|
|
230
|
+
# Priority 4: Use framework agents directory (fallback)
|
|
231
|
+
framework_base_agent = paths.agents_dir / "base_agent.json"
|
|
232
|
+
if framework_base_agent.exists():
|
|
233
|
+
self.logger.info(f"Using framework base_agent: {framework_base_agent}")
|
|
234
|
+
return framework_base_agent
|
|
235
|
+
|
|
236
|
+
# If still not found, log all searched locations and raise error
|
|
237
|
+
self.logger.error("Base agent file not found in any location:")
|
|
238
|
+
self.logger.error(f" 1. CWD: {cwd_base_agent}")
|
|
239
|
+
self.logger.error(f" 2. Dev paths: {known_dev_paths}")
|
|
240
|
+
self.logger.error(f" 3. User: {user_base_agent}")
|
|
241
|
+
self.logger.error(f" 4. Framework: {framework_base_agent}")
|
|
242
|
+
|
|
243
|
+
# Final fallback to framework path even if it doesn't exist
|
|
244
|
+
# (will fail later with better error message)
|
|
245
|
+
return framework_base_agent
|
|
184
246
|
|
|
185
247
|
def deploy_agents(
|
|
186
248
|
self,
|
|
@@ -83,7 +83,8 @@ class AsyncAgentDeploymentService:
|
|
|
83
83
|
if base_agent_path:
|
|
84
84
|
self.base_agent_path = Path(base_agent_path)
|
|
85
85
|
else:
|
|
86
|
-
|
|
86
|
+
# Use priority-based search for base_agent.json
|
|
87
|
+
self.base_agent_path = self._find_base_agent_file()
|
|
87
88
|
|
|
88
89
|
# Thread pool for CPU-bound JSON parsing
|
|
89
90
|
self.executor = ThreadPoolExecutor(max_workers=4)
|
|
@@ -94,6 +95,69 @@ class AsyncAgentDeploymentService:
|
|
|
94
95
|
"parallel_files_processed": 0,
|
|
95
96
|
"time_saved_ms": 0.0,
|
|
96
97
|
}
|
|
98
|
+
|
|
99
|
+
self.logger.info(f"Base agent path: {self.base_agent_path}")
|
|
100
|
+
|
|
101
|
+
def _find_base_agent_file(self) -> Path:
|
|
102
|
+
"""Find base agent file with priority-based search.
|
|
103
|
+
|
|
104
|
+
Priority order:
|
|
105
|
+
1. Environment variable override (CLAUDE_MPM_BASE_AGENT_PATH)
|
|
106
|
+
2. Current working directory (for local development)
|
|
107
|
+
3. Known development locations
|
|
108
|
+
4. User override location (~/.claude/agents/)
|
|
109
|
+
5. Framework agents directory (from paths)
|
|
110
|
+
"""
|
|
111
|
+
# Priority 0: Check environment variable override
|
|
112
|
+
env_path = os.environ.get("CLAUDE_MPM_BASE_AGENT_PATH")
|
|
113
|
+
if env_path:
|
|
114
|
+
env_base_agent = Path(env_path)
|
|
115
|
+
if env_base_agent.exists():
|
|
116
|
+
self.logger.info(f"Using environment variable base_agent: {env_base_agent}")
|
|
117
|
+
return env_base_agent
|
|
118
|
+
else:
|
|
119
|
+
self.logger.warning(f"CLAUDE_MPM_BASE_AGENT_PATH set but file doesn't exist: {env_base_agent}")
|
|
120
|
+
|
|
121
|
+
# Priority 1: Check current working directory for local development
|
|
122
|
+
cwd = Path.cwd()
|
|
123
|
+
cwd_base_agent = cwd / "src" / "claude_mpm" / "agents" / "base_agent.json"
|
|
124
|
+
if cwd_base_agent.exists():
|
|
125
|
+
self.logger.info(f"Using local development base_agent from cwd: {cwd_base_agent}")
|
|
126
|
+
return cwd_base_agent
|
|
127
|
+
|
|
128
|
+
# Priority 2: Check known development locations
|
|
129
|
+
known_dev_paths = [
|
|
130
|
+
Path("/Users/masa/Projects/claude-mpm/src/claude_mpm/agents/base_agent.json"),
|
|
131
|
+
Path.home() / "Projects" / "claude-mpm" / "src" / "claude_mpm" / "agents" / "base_agent.json",
|
|
132
|
+
Path.home() / "projects" / "claude-mpm" / "src" / "claude_mpm" / "agents" / "base_agent.json",
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
for dev_path in known_dev_paths:
|
|
136
|
+
if dev_path.exists():
|
|
137
|
+
self.logger.info(f"Using development base_agent: {dev_path}")
|
|
138
|
+
return dev_path
|
|
139
|
+
|
|
140
|
+
# Priority 3: Check user override location
|
|
141
|
+
user_base_agent = Path.home() / ".claude" / "agents" / "base_agent.json"
|
|
142
|
+
if user_base_agent.exists():
|
|
143
|
+
self.logger.info(f"Using user override base_agent: {user_base_agent}")
|
|
144
|
+
return user_base_agent
|
|
145
|
+
|
|
146
|
+
# Priority 4: Use framework agents directory (fallback)
|
|
147
|
+
framework_base_agent = paths.agents_dir / "base_agent.json"
|
|
148
|
+
if framework_base_agent.exists():
|
|
149
|
+
self.logger.info(f"Using framework base_agent: {framework_base_agent}")
|
|
150
|
+
return framework_base_agent
|
|
151
|
+
|
|
152
|
+
# If still not found, log all searched locations
|
|
153
|
+
self.logger.warning("Base agent file not found in any location:")
|
|
154
|
+
self.logger.warning(f" 1. CWD: {cwd_base_agent}")
|
|
155
|
+
self.logger.warning(f" 2. Dev paths: {known_dev_paths}")
|
|
156
|
+
self.logger.warning(f" 3. User: {user_base_agent}")
|
|
157
|
+
self.logger.warning(f" 4. Framework: {framework_base_agent}")
|
|
158
|
+
|
|
159
|
+
# Final fallback to framework path even if it doesn't exist
|
|
160
|
+
return framework_base_agent
|
|
97
161
|
|
|
98
162
|
async def discover_agents_async(
|
|
99
163
|
self, directories: List[Path]
|
|
@@ -83,16 +83,14 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
83
83
|
# Use current working directory by default, not project root
|
|
84
84
|
self.working_directory = working_directory or Path(os.getcwd())
|
|
85
85
|
|
|
86
|
-
#
|
|
87
|
-
self.user_memories_dir = Path.home() / ".claude-mpm" / "memories"
|
|
86
|
+
# Use only project memory directory
|
|
88
87
|
self.project_memories_dir = self.working_directory / ".claude-mpm" / "memories"
|
|
89
88
|
|
|
90
|
-
# Primary memories_dir points to project
|
|
89
|
+
# Primary memories_dir points to project
|
|
91
90
|
self.memories_dir = self.project_memories_dir
|
|
92
91
|
|
|
93
|
-
# Ensure
|
|
92
|
+
# Ensure project directory exists
|
|
94
93
|
self._ensure_memories_directory()
|
|
95
|
-
self._ensure_user_memories_directory()
|
|
96
94
|
|
|
97
95
|
# Initialize memory limits from configuration
|
|
98
96
|
self._init_memory_limits()
|
|
@@ -222,6 +220,8 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
222
220
|
try:
|
|
223
221
|
content = old_file_agent.read_text(encoding="utf-8")
|
|
224
222
|
new_file.write_text(content, encoding="utf-8")
|
|
223
|
+
|
|
224
|
+
# Delete old file for all agents
|
|
225
225
|
old_file_agent.unlink()
|
|
226
226
|
self.logger.info(f"Migrated memory file from {old_file_agent.name} to {new_file.name}")
|
|
227
227
|
except Exception as e:
|
|
@@ -232,6 +232,8 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
232
232
|
try:
|
|
233
233
|
content = old_file_simple.read_text(encoding="utf-8")
|
|
234
234
|
new_file.write_text(content, encoding="utf-8")
|
|
235
|
+
|
|
236
|
+
# Delete old file for all agents
|
|
235
237
|
old_file_simple.unlink()
|
|
236
238
|
self.logger.info(f"Migrated memory file from {old_file_simple.name} to {new_file.name}")
|
|
237
239
|
except Exception as e:
|
|
@@ -241,64 +243,34 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
241
243
|
return new_file
|
|
242
244
|
|
|
243
245
|
def load_agent_memory(self, agent_id: str) -> str:
|
|
244
|
-
"""Load agent memory file content from
|
|
246
|
+
"""Load agent memory file content from project directory.
|
|
245
247
|
|
|
246
248
|
WHY: Agents need to read their accumulated knowledge before starting tasks
|
|
247
|
-
to apply learned patterns and avoid repeated mistakes.
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
Loading order:
|
|
251
|
-
1. User-level memory (~/.claude-mpm/memories/{agent_id}_memories.md)
|
|
252
|
-
2. Project-level memory (./.claude-mpm/memories/{agent_id}_memories.md)
|
|
253
|
-
3. Project memory overrides/extends user memory
|
|
249
|
+
to apply learned patterns and avoid repeated mistakes. All memories are
|
|
250
|
+
now stored at the project level for consistency.
|
|
254
251
|
|
|
255
252
|
Args:
|
|
256
|
-
agent_id: The agent identifier (e.g., 'research', 'engineer')
|
|
253
|
+
agent_id: The agent identifier (e.g., 'PM', 'research', 'engineer')
|
|
257
254
|
|
|
258
255
|
Returns:
|
|
259
|
-
str: The
|
|
256
|
+
str: The memory file content, creating default if doesn't exist
|
|
260
257
|
"""
|
|
261
|
-
#
|
|
262
|
-
user_memory_file = self._get_memory_file_with_migration(self.user_memories_dir, agent_id)
|
|
258
|
+
# All agents use project directory
|
|
263
259
|
project_memory_file = self._get_memory_file_with_migration(self.project_memories_dir, agent_id)
|
|
264
260
|
|
|
265
|
-
user_memory = None
|
|
266
|
-
project_memory = None
|
|
267
|
-
|
|
268
|
-
# Load user-level memory if exists
|
|
269
|
-
if user_memory_file.exists():
|
|
270
|
-
try:
|
|
271
|
-
user_memory = user_memory_file.read_text(encoding="utf-8")
|
|
272
|
-
user_memory = self.content_manager.validate_and_repair(user_memory, agent_id)
|
|
273
|
-
self.logger.debug(f"Loaded user-level memory for {agent_id}")
|
|
274
|
-
except Exception as e:
|
|
275
|
-
self.logger.error(f"Error reading user memory file for {agent_id}: {e}")
|
|
276
|
-
|
|
277
261
|
# Load project-level memory if exists
|
|
278
262
|
if project_memory_file.exists():
|
|
279
263
|
try:
|
|
280
264
|
project_memory = project_memory_file.read_text(encoding="utf-8")
|
|
281
265
|
project_memory = self.content_manager.validate_and_repair(project_memory, agent_id)
|
|
282
266
|
self.logger.debug(f"Loaded project-level memory for {agent_id}")
|
|
267
|
+
return project_memory
|
|
283
268
|
except Exception as e:
|
|
284
269
|
self.logger.error(f"Error reading project memory file for {agent_id}: {e}")
|
|
285
270
|
|
|
286
|
-
#
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
aggregated = self._aggregate_agent_memories(user_memory, project_memory, agent_id)
|
|
290
|
-
self.logger.info(f"Aggregated user and project memories for {agent_id}")
|
|
291
|
-
return aggregated
|
|
292
|
-
elif project_memory:
|
|
293
|
-
# Only project memory exists
|
|
294
|
-
return project_memory
|
|
295
|
-
elif user_memory:
|
|
296
|
-
# Only user memory exists
|
|
297
|
-
return user_memory
|
|
298
|
-
else:
|
|
299
|
-
# Neither exists - create default in project directory
|
|
300
|
-
self.logger.info(f"Creating default memory for agent: {agent_id}")
|
|
301
|
-
return self._create_default_memory(agent_id)
|
|
271
|
+
# Memory doesn't exist - create default in project directory
|
|
272
|
+
self.logger.info(f"Creating default memory for agent: {agent_id}")
|
|
273
|
+
return self._create_default_memory(agent_id)
|
|
302
274
|
|
|
303
275
|
def update_agent_memory(self, agent_id: str, section: str, new_item: str) -> bool:
|
|
304
276
|
"""Add new learning item to specified section.
|
|
@@ -389,9 +361,10 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
389
361
|
# Delegate to template generator
|
|
390
362
|
template = self.template_generator.create_default_memory(agent_id, limits)
|
|
391
363
|
|
|
392
|
-
# Save default file
|
|
364
|
+
# Save default file to project directory
|
|
393
365
|
try:
|
|
394
|
-
|
|
366
|
+
target_dir = self.memories_dir
|
|
367
|
+
memory_file = target_dir / f"{agent_id}_memories.md"
|
|
395
368
|
memory_file.write_text(template, encoding="utf-8")
|
|
396
369
|
self.logger.info(f"Created project-specific memory file for {agent_id}")
|
|
397
370
|
|
|
@@ -400,32 +373,30 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
400
373
|
|
|
401
374
|
return template
|
|
402
375
|
|
|
403
|
-
def _save_memory_file(self, agent_id: str, content: str
|
|
376
|
+
def _save_memory_file(self, agent_id: str, content: str) -> bool:
|
|
404
377
|
"""Save memory content to file.
|
|
405
378
|
|
|
406
379
|
WHY: Memory updates need to be persisted atomically to prevent corruption
|
|
407
|
-
and ensure learnings are preserved across agent invocations.
|
|
408
|
-
saves to project directory, but can optionally save to user directory.
|
|
380
|
+
and ensure learnings are preserved across agent invocations.
|
|
409
381
|
|
|
410
382
|
Args:
|
|
411
383
|
agent_id: Agent identifier
|
|
412
384
|
content: Content to save
|
|
413
|
-
save_to_user: If True, saves to user directory instead of project
|
|
414
385
|
|
|
415
386
|
Returns:
|
|
416
387
|
bool: True if save succeeded
|
|
417
388
|
"""
|
|
418
389
|
try:
|
|
419
|
-
#
|
|
420
|
-
target_dir = self.
|
|
421
|
-
memory_file = target_dir / f"{agent_id}_memories.md"
|
|
390
|
+
# All agents save to project directory
|
|
391
|
+
target_dir = self.project_memories_dir
|
|
422
392
|
|
|
423
393
|
# Ensure directory exists
|
|
424
394
|
target_dir.mkdir(parents=True, exist_ok=True)
|
|
425
395
|
|
|
396
|
+
memory_file = target_dir / f"{agent_id}_memories.md"
|
|
426
397
|
memory_file.write_text(content, encoding="utf-8")
|
|
427
|
-
|
|
428
|
-
self.logger.
|
|
398
|
+
|
|
399
|
+
self.logger.info(f"Saved {agent_id} memory to project directory: {memory_file}")
|
|
429
400
|
return True
|
|
430
401
|
except Exception as e:
|
|
431
402
|
self.logger.error(f"Error saving memory for {agent_id}: {e}")
|
|
@@ -535,6 +506,10 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
535
506
|
import json
|
|
536
507
|
import re
|
|
537
508
|
|
|
509
|
+
# Log that we're processing memory for this agent
|
|
510
|
+
is_pm = agent_id.upper() == "PM"
|
|
511
|
+
self.logger.debug(f"Extracting memory for {agent_id} (is_pm={is_pm})")
|
|
512
|
+
|
|
538
513
|
# Look for JSON block in the response
|
|
539
514
|
# Pattern matches ```json ... ``` blocks
|
|
540
515
|
json_pattern = r'```json\s*(.*?)\s*```'
|
|
@@ -570,15 +545,20 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
570
545
|
|
|
571
546
|
# Only proceed if we have valid items
|
|
572
547
|
if valid_items:
|
|
548
|
+
self.logger.info(f"Found {len(valid_items)} memory items for {agent_id}: {valid_items[:2]}...")
|
|
573
549
|
success = self._add_learnings_to_memory(agent_id, valid_items)
|
|
574
550
|
if success:
|
|
575
|
-
self.logger.info(f"
|
|
551
|
+
self.logger.info(f"Successfully saved {len(valid_items)} memories for {agent_id} to project directory")
|
|
576
552
|
return True
|
|
553
|
+
else:
|
|
554
|
+
self.logger.error(f"Failed to save memories for {agent_id}")
|
|
577
555
|
|
|
578
|
-
except json.JSONDecodeError:
|
|
556
|
+
except json.JSONDecodeError as je:
|
|
579
557
|
# Not valid JSON, continue to next match
|
|
558
|
+
self.logger.debug(f"JSON decode error for {agent_id}: {je}")
|
|
580
559
|
continue
|
|
581
560
|
|
|
561
|
+
self.logger.debug(f"No memory items found in response for {agent_id}")
|
|
582
562
|
return False
|
|
583
563
|
|
|
584
564
|
except Exception as e:
|
|
@@ -590,7 +570,7 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
590
570
|
|
|
591
571
|
WHY: Instead of replacing all memory, we want to intelligently merge new
|
|
592
572
|
learnings with existing knowledge, avoiding duplicates and maintaining
|
|
593
|
-
the most relevant information.
|
|
573
|
+
the most relevant information. PM memories are always saved to user dir.
|
|
594
574
|
|
|
595
575
|
Args:
|
|
596
576
|
agent_id: The agent identifier
|
|
@@ -635,8 +615,9 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
635
615
|
if not learning.startswith("-"):
|
|
636
616
|
learning = f"- {learning}"
|
|
637
617
|
sections[section].append(learning)
|
|
618
|
+
self.logger.info(f"Added new memory for {agent_id}: {learning[:50]}...")
|
|
638
619
|
else:
|
|
639
|
-
self.logger.debug(f"Skipping duplicate memory: {learning}")
|
|
620
|
+
self.logger.debug(f"Skipping duplicate memory for {agent_id}: {learning}")
|
|
640
621
|
|
|
641
622
|
# Rebuild memory content
|
|
642
623
|
new_content = self._build_memory_content(agent_id, sections)
|
|
@@ -647,6 +628,7 @@ class AgentMemoryManager(MemoryServiceInterface):
|
|
|
647
628
|
self.logger.debug(f"Memory for {agent_id} exceeds limits, truncating")
|
|
648
629
|
new_content = self.content_manager.truncate_to_limits(new_content, agent_limits)
|
|
649
630
|
|
|
631
|
+
# All memories go to project directory
|
|
650
632
|
return self._save_memory_file(agent_id, new_content)
|
|
651
633
|
|
|
652
634
|
except Exception as e:
|
|
@@ -1005,150 +987,7 @@ Standard markdown with structured sections. Agents expect:
|
|
|
1005
987
|
self.logger.error(f"Error ensuring memories directory: {e}")
|
|
1006
988
|
# Continue anyway - memory system should not block operations
|
|
1007
989
|
|
|
1008
|
-
def _ensure_user_memories_directory(self):
|
|
1009
|
-
"""Ensure user-level memories directory exists with README.
|
|
1010
|
-
|
|
1011
|
-
WHY: User-level memories provide global defaults that apply across all projects,
|
|
1012
|
-
allowing users to maintain common patterns and guidelines.
|
|
1013
|
-
"""
|
|
1014
|
-
try:
|
|
1015
|
-
self.user_memories_dir.mkdir(parents=True, exist_ok=True)
|
|
1016
|
-
self.logger.debug(f"Ensured user memories directory exists: {self.user_memories_dir}")
|
|
1017
|
-
|
|
1018
|
-
readme_path = self.user_memories_dir / "README.md"
|
|
1019
|
-
if not readme_path.exists():
|
|
1020
|
-
readme_content = """# User-Level Agent Memory System
|
|
1021
|
-
|
|
1022
|
-
## Purpose
|
|
1023
|
-
User-level memories provide global defaults that apply to all projects. These memories are
|
|
1024
|
-
loaded first, then project-specific memories can override or extend them.
|
|
1025
|
-
|
|
1026
|
-
## Directory Hierarchy
|
|
1027
|
-
1. **User-level memories** (~/.claude-mpm/memories/): Global defaults for all projects
|
|
1028
|
-
2. **Project-level memories** (./.claude-mpm/memories/): Project-specific overrides
|
|
1029
990
|
|
|
1030
|
-
## How Memories Are Aggregated
|
|
1031
|
-
- User memories are loaded first as the base
|
|
1032
|
-
- Project memories override or extend user memories
|
|
1033
|
-
- Duplicate sections are merged with project taking precedence
|
|
1034
|
-
- Unique sections from both sources are preserved
|
|
1035
|
-
|
|
1036
|
-
## Manual Editing
|
|
1037
|
-
Feel free to edit these files to add:
|
|
1038
|
-
- Common coding patterns you always use
|
|
1039
|
-
- Personal style guidelines
|
|
1040
|
-
- Frequently used architectural patterns
|
|
1041
|
-
- Global best practices
|
|
1042
|
-
|
|
1043
|
-
## File Format
|
|
1044
|
-
Same as project memories - standard markdown with structured sections:
|
|
1045
|
-
- Project Architecture
|
|
1046
|
-
- Implementation Guidelines
|
|
1047
|
-
- Common Mistakes to Avoid
|
|
1048
|
-
- Current Technical Context
|
|
1049
|
-
|
|
1050
|
-
## Examples of Good User-Level Memories
|
|
1051
|
-
- "Always use type hints in Python code"
|
|
1052
|
-
- "Prefer composition over inheritance"
|
|
1053
|
-
- "Write comprehensive docstrings for public APIs"
|
|
1054
|
-
- "Use dependency injection for testability"
|
|
1055
|
-
"""
|
|
1056
|
-
readme_path.write_text(readme_content, encoding="utf-8")
|
|
1057
|
-
self.logger.info("Created README.md in user memories directory")
|
|
1058
|
-
|
|
1059
|
-
except Exception as e:
|
|
1060
|
-
self.logger.error(f"Error ensuring user memories directory: {e}")
|
|
1061
|
-
# Continue anyway - memory system should not block operations
|
|
1062
|
-
|
|
1063
|
-
def _aggregate_agent_memories(self, user_memory: str, project_memory: str, agent_id: str) -> str:
|
|
1064
|
-
"""Aggregate user and project memories for an agent.
|
|
1065
|
-
|
|
1066
|
-
WHY: When both user-level and project-level memories exist, they need to be
|
|
1067
|
-
intelligently merged to provide comprehensive context while avoiding duplication
|
|
1068
|
-
and respecting project-specific overrides.
|
|
1069
|
-
|
|
1070
|
-
Strategy:
|
|
1071
|
-
- Parse both memories into sections
|
|
1072
|
-
- Merge sections with project taking precedence
|
|
1073
|
-
- Remove exact duplicates within sections
|
|
1074
|
-
- Preserve unique items from both sources
|
|
1075
|
-
|
|
1076
|
-
Args:
|
|
1077
|
-
user_memory: User-level memory content
|
|
1078
|
-
project_memory: Project-level memory content
|
|
1079
|
-
agent_id: Agent identifier for context
|
|
1080
|
-
|
|
1081
|
-
Returns:
|
|
1082
|
-
str: Aggregated memory content
|
|
1083
|
-
"""
|
|
1084
|
-
# Parse memories into sections
|
|
1085
|
-
user_sections = self._parse_memory_sections(user_memory)
|
|
1086
|
-
project_sections = self._parse_memory_sections(project_memory)
|
|
1087
|
-
|
|
1088
|
-
# Start with user sections as base
|
|
1089
|
-
merged_sections = {}
|
|
1090
|
-
|
|
1091
|
-
# Add all user sections first
|
|
1092
|
-
for section_name, items in user_sections.items():
|
|
1093
|
-
merged_sections[section_name] = set(items)
|
|
1094
|
-
|
|
1095
|
-
# Merge project sections (overrides/extends user)
|
|
1096
|
-
for section_name, items in project_sections.items():
|
|
1097
|
-
if section_name in merged_sections:
|
|
1098
|
-
# Merge items - project items take precedence
|
|
1099
|
-
merged_sections[section_name].update(items)
|
|
1100
|
-
else:
|
|
1101
|
-
# New section from project
|
|
1102
|
-
merged_sections[section_name] = set(items)
|
|
1103
|
-
|
|
1104
|
-
# Build aggregated memory content
|
|
1105
|
-
lines = []
|
|
1106
|
-
|
|
1107
|
-
# Add header
|
|
1108
|
-
lines.append(f"# {agent_id.capitalize()} Agent Memory")
|
|
1109
|
-
lines.append("")
|
|
1110
|
-
lines.append("*Aggregated from user-level and project-level memories*")
|
|
1111
|
-
lines.append("")
|
|
1112
|
-
lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
|
|
1113
|
-
lines.append("")
|
|
1114
|
-
|
|
1115
|
-
# Add sections in a consistent order
|
|
1116
|
-
section_order = [
|
|
1117
|
-
"Project Architecture",
|
|
1118
|
-
"Implementation Guidelines",
|
|
1119
|
-
"Common Mistakes to Avoid",
|
|
1120
|
-
"Current Technical Context",
|
|
1121
|
-
"Coding Patterns Learned",
|
|
1122
|
-
"Effective Strategies",
|
|
1123
|
-
"Integration Points",
|
|
1124
|
-
"Performance Considerations",
|
|
1125
|
-
"Domain-Specific Knowledge",
|
|
1126
|
-
"Recent Learnings"
|
|
1127
|
-
]
|
|
1128
|
-
|
|
1129
|
-
# First add ordered sections that exist
|
|
1130
|
-
for section_name in section_order:
|
|
1131
|
-
if section_name in merged_sections and merged_sections[section_name]:
|
|
1132
|
-
lines.append(f"## {section_name}")
|
|
1133
|
-
lines.append("")
|
|
1134
|
-
# Sort items for consistent output
|
|
1135
|
-
for item in sorted(merged_sections[section_name]):
|
|
1136
|
-
if item.strip(): # Skip empty items
|
|
1137
|
-
lines.append(item)
|
|
1138
|
-
lines.append("")
|
|
1139
|
-
|
|
1140
|
-
# Then add any remaining sections not in the order list
|
|
1141
|
-
remaining_sections = set(merged_sections.keys()) - set(section_order)
|
|
1142
|
-
for section_name in sorted(remaining_sections):
|
|
1143
|
-
if merged_sections[section_name]:
|
|
1144
|
-
lines.append(f"## {section_name}")
|
|
1145
|
-
lines.append("")
|
|
1146
|
-
for item in sorted(merged_sections[section_name]):
|
|
1147
|
-
if item.strip():
|
|
1148
|
-
lines.append(item)
|
|
1149
|
-
lines.append("")
|
|
1150
|
-
|
|
1151
|
-
return '\n'.join(lines)
|
|
1152
991
|
|
|
1153
992
|
def _parse_memory_sections(self, memory_content: str) -> Dict[str, List[str]]:
|
|
1154
993
|
"""Parse memory content into sections and items.
|
|
@@ -143,11 +143,69 @@ class MemoryHookService(BaseService, MemoryHookInterface):
|
|
|
143
143
|
HookResult with success status and any modifications
|
|
144
144
|
"""
|
|
145
145
|
try:
|
|
146
|
-
# This would integrate with a memory service to save new memories
|
|
147
|
-
# For now, this is a placeholder for future memory integration
|
|
148
|
-
self.logger.debug("Saving new memories from interaction")
|
|
149
|
-
|
|
150
146
|
from claude_mpm.hooks.base_hook import HookResult
|
|
147
|
+
|
|
148
|
+
# Extract agent_id and response from context
|
|
149
|
+
agent_id = None
|
|
150
|
+
response_text = None
|
|
151
|
+
|
|
152
|
+
# Try to get agent_id from various possible locations in context
|
|
153
|
+
if hasattr(context, 'data') and context.data:
|
|
154
|
+
data = context.data
|
|
155
|
+
|
|
156
|
+
# Check for agent_id in various locations
|
|
157
|
+
if isinstance(data, dict):
|
|
158
|
+
# Try direct agent_id field
|
|
159
|
+
agent_id = data.get('agent_id')
|
|
160
|
+
|
|
161
|
+
# Try agent_type field
|
|
162
|
+
if not agent_id:
|
|
163
|
+
agent_id = data.get('agent_type')
|
|
164
|
+
|
|
165
|
+
# Try subagent_type (for Task delegations)
|
|
166
|
+
if not agent_id:
|
|
167
|
+
agent_id = data.get('subagent_type')
|
|
168
|
+
|
|
169
|
+
# Try tool_parameters for Task delegations
|
|
170
|
+
if not agent_id and 'tool_parameters' in data:
|
|
171
|
+
params = data.get('tool_parameters', {})
|
|
172
|
+
if isinstance(params, dict):
|
|
173
|
+
agent_id = params.get('subagent_type')
|
|
174
|
+
|
|
175
|
+
# Extract response text
|
|
176
|
+
response_text = data.get('response') or data.get('result') or data.get('output')
|
|
177
|
+
|
|
178
|
+
# If response_text is a dict, try to get text from it
|
|
179
|
+
if isinstance(response_text, dict):
|
|
180
|
+
response_text = response_text.get('text') or response_text.get('content') or str(response_text)
|
|
181
|
+
|
|
182
|
+
# Default to PM if no agent_id found
|
|
183
|
+
if not agent_id:
|
|
184
|
+
agent_id = "PM"
|
|
185
|
+
self.logger.debug("No agent_id found in context, defaulting to PM")
|
|
186
|
+
|
|
187
|
+
# Only process if we have response text
|
|
188
|
+
if response_text and isinstance(response_text, str):
|
|
189
|
+
self.logger.debug(f"Processing memory extraction for agent: {agent_id}")
|
|
190
|
+
|
|
191
|
+
# Import and use the memory manager
|
|
192
|
+
from claude_mpm.services.agents.memory.agent_memory_manager import get_memory_manager
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
memory_manager = get_memory_manager()
|
|
196
|
+
|
|
197
|
+
# Extract and update memory
|
|
198
|
+
success = memory_manager.extract_and_update_memory(agent_id, response_text)
|
|
199
|
+
|
|
200
|
+
if success:
|
|
201
|
+
self.logger.info(f"Successfully extracted and saved memories for {agent_id}")
|
|
202
|
+
else:
|
|
203
|
+
self.logger.debug(f"No memories found to extract for {agent_id}")
|
|
204
|
+
|
|
205
|
+
except Exception as mem_error:
|
|
206
|
+
self.logger.warning(f"Failed to extract/save memories for {agent_id}: {mem_error}")
|
|
207
|
+
else:
|
|
208
|
+
self.logger.debug("No response text found in context for memory extraction")
|
|
151
209
|
|
|
152
210
|
return HookResult(success=True, data=context.data, modified=False)
|
|
153
211
|
|
|
@@ -72,11 +72,12 @@ class RunnerConfigurationService(BaseService, RunnerConfigurationInterface):
|
|
|
72
72
|
Loaded configuration dictionary
|
|
73
73
|
"""
|
|
74
74
|
try:
|
|
75
|
+
# Use singleton Config instance to prevent duplicate loading
|
|
75
76
|
if config_path:
|
|
76
|
-
#
|
|
77
|
-
config = Config(config_path)
|
|
77
|
+
# Only pass config_path if it's different from what might already be loaded
|
|
78
|
+
config = Config({}, config_path)
|
|
78
79
|
else:
|
|
79
|
-
#
|
|
80
|
+
# Use existing singleton instance
|
|
80
81
|
config = Config()
|
|
81
82
|
|
|
82
83
|
return {
|
|
@@ -162,14 +163,9 @@ class RunnerConfigurationService(BaseService, RunnerConfigurationInterface):
|
|
|
162
163
|
"websocket_port": kwargs.get("websocket_port", 8765),
|
|
163
164
|
}
|
|
164
165
|
|
|
165
|
-
# Initialize main configuration
|
|
166
|
+
# Initialize main configuration (singleton will prevent duplicate loading)
|
|
166
167
|
try:
|
|
167
168
|
config = Config()
|
|
168
|
-
except FileNotFoundError as e:
|
|
169
|
-
self.logger.warning(
|
|
170
|
-
"Configuration file not found, using defaults", extra={"error": str(e)}
|
|
171
|
-
)
|
|
172
|
-
config = Config() # Will use defaults
|
|
173
169
|
except Exception as e:
|
|
174
170
|
self.logger.error("Failed to load configuration", exc_info=True)
|
|
175
171
|
raise RuntimeError(f"Configuration initialization failed: {e}") from e
|