claude-mpm 4.0.31__py3-none-any.whl → 4.0.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/BASE_AGENT_TEMPLATE.md +33 -25
  3. claude_mpm/agents/INSTRUCTIONS.md +14 -10
  4. claude_mpm/agents/templates/documentation.json +51 -34
  5. claude_mpm/agents/templates/research.json +0 -11
  6. claude_mpm/cli/__init__.py +63 -26
  7. claude_mpm/cli/commands/agent_manager.py +10 -8
  8. claude_mpm/core/framework_loader.py +272 -113
  9. claude_mpm/dashboard/static/css/dashboard.css +449 -0
  10. claude_mpm/dashboard/static/dist/components/agent-inference.js +1 -1
  11. claude_mpm/dashboard/static/dist/components/event-viewer.js +1 -1
  12. claude_mpm/dashboard/static/dist/components/file-tool-tracker.js +1 -1
  13. claude_mpm/dashboard/static/dist/components/module-viewer.js +1 -1
  14. claude_mpm/dashboard/static/dist/components/session-manager.js +1 -1
  15. claude_mpm/dashboard/static/dist/dashboard.js +1 -1
  16. claude_mpm/dashboard/static/dist/socket-client.js +1 -1
  17. claude_mpm/dashboard/static/js/components/agent-hierarchy.js +774 -0
  18. claude_mpm/dashboard/static/js/components/agent-inference.js +257 -3
  19. claude_mpm/dashboard/static/js/components/build-tracker.js +289 -0
  20. claude_mpm/dashboard/static/js/components/event-viewer.js +168 -39
  21. claude_mpm/dashboard/static/js/components/file-tool-tracker.js +17 -0
  22. claude_mpm/dashboard/static/js/components/session-manager.js +23 -3
  23. claude_mpm/dashboard/static/js/components/socket-manager.js +2 -0
  24. claude_mpm/dashboard/static/js/dashboard.js +207 -31
  25. claude_mpm/dashboard/static/js/socket-client.js +85 -6
  26. claude_mpm/dashboard/templates/index.html +1 -0
  27. claude_mpm/hooks/claude_hooks/connection_pool.py +12 -2
  28. claude_mpm/hooks/claude_hooks/event_handlers.py +81 -19
  29. claude_mpm/hooks/claude_hooks/hook_handler.py +72 -10
  30. claude_mpm/hooks/claude_hooks/hook_handler_eventbus.py +398 -0
  31. claude_mpm/hooks/claude_hooks/response_tracking.py +10 -0
  32. claude_mpm/services/agents/deployment/agent_deployment.py +86 -37
  33. claude_mpm/services/agents/deployment/agent_template_builder.py +18 -10
  34. claude_mpm/services/agents/deployment/agents_directory_resolver.py +10 -25
  35. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +189 -3
  36. claude_mpm/services/agents/deployment/pipeline/steps/target_directory_step.py +3 -2
  37. claude_mpm/services/agents/deployment/strategies/system_strategy.py +10 -3
  38. claude_mpm/services/agents/deployment/strategies/user_strategy.py +10 -14
  39. claude_mpm/services/agents/deployment/system_instructions_deployer.py +8 -13
  40. claude_mpm/services/agents/memory/agent_memory_manager.py +141 -184
  41. claude_mpm/services/agents/memory/content_manager.py +182 -232
  42. claude_mpm/services/agents/memory/template_generator.py +4 -40
  43. claude_mpm/services/event_bus/__init__.py +18 -0
  44. claude_mpm/services/event_bus/event_bus.py +334 -0
  45. claude_mpm/services/event_bus/relay.py +301 -0
  46. claude_mpm/services/events/__init__.py +44 -0
  47. claude_mpm/services/events/consumers/__init__.py +18 -0
  48. claude_mpm/services/events/consumers/dead_letter.py +296 -0
  49. claude_mpm/services/events/consumers/logging.py +183 -0
  50. claude_mpm/services/events/consumers/metrics.py +242 -0
  51. claude_mpm/services/events/consumers/socketio.py +376 -0
  52. claude_mpm/services/events/core.py +470 -0
  53. claude_mpm/services/events/interfaces.py +230 -0
  54. claude_mpm/services/events/producers/__init__.py +14 -0
  55. claude_mpm/services/events/producers/hook.py +269 -0
  56. claude_mpm/services/events/producers/system.py +327 -0
  57. claude_mpm/services/mcp_gateway/core/process_pool.py +411 -0
  58. claude_mpm/services/mcp_gateway/server/stdio_server.py +13 -0
  59. claude_mpm/services/monitor_build_service.py +345 -0
  60. claude_mpm/services/socketio/event_normalizer.py +667 -0
  61. claude_mpm/services/socketio/handlers/connection.py +78 -20
  62. claude_mpm/services/socketio/handlers/hook.py +14 -5
  63. claude_mpm/services/socketio/migration_utils.py +329 -0
  64. claude_mpm/services/socketio/server/broadcaster.py +26 -33
  65. claude_mpm/services/socketio/server/core.py +4 -3
  66. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/METADATA +4 -3
  67. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/RECORD +71 -50
  68. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/WHEEL +0 -0
  69. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/entry_points.txt +0 -0
  70. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/licenses/LICENSE +0 -0
  71. {claude_mpm-4.0.31.dist-info → claude_mpm-4.0.34.dist-info}/top_level.txt +0 -0
@@ -24,16 +24,9 @@ class MemoryContentManager:
24
24
 
25
25
  WHY: Memory content requires careful manipulation to maintain structure,
26
26
  enforce limits, and ensure consistency. This class centralizes all content
27
- manipulation logic.
27
+ manipulation logic for simple list-based memories.
28
28
  """
29
29
 
30
- REQUIRED_SECTIONS = [
31
- "Project Architecture",
32
- "Implementation Guidelines",
33
- "Common Mistakes to Avoid",
34
- "Current Technical Context",
35
- ]
36
-
37
30
  def __init__(self, memory_limits: Dict[str, Any]):
38
31
  """Initialize the content manager.
39
32
 
@@ -43,41 +36,21 @@ class MemoryContentManager:
43
36
  self.memory_limits = memory_limits
44
37
  self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
45
38
 
46
- def add_item_to_section(self, content: str, section: str, new_item: str) -> str:
47
- """Add item to specified section with NLP-based deduplication.
39
+ def add_item_to_list(self, content: str, new_item: str) -> str:
40
+ """Add item to memory list with deduplication.
48
41
 
49
- WHY: Each section has a maximum item limit to prevent information overload
50
- and maintain readability. Additionally, we use NLP-based similarity detection
51
- to prevent duplicate or highly similar items from cluttering the memory.
52
- When similar items are found (>80% similarity), the newer item replaces the
53
- older one to maintain recency while avoiding redundancy.
42
+ WHY: Simplified memory system uses a simple list format. We still use
43
+ NLP-based similarity detection to prevent duplicate or highly similar
44
+ items from cluttering the memory.
54
45
 
55
46
  Args:
56
47
  content: Current memory file content
57
- section: Section name to add item to
58
48
  new_item: Item to add
59
49
 
60
50
  Returns:
61
51
  str: Updated content with new item added and duplicates removed
62
52
  """
63
53
  lines = content.split("\n")
64
- section_start = None
65
- section_end = None
66
-
67
- # Find section boundaries
68
- for i, line in enumerate(lines):
69
- if line.startswith(f"## {section}"):
70
- section_start = i
71
- elif section_start is not None and line.startswith("## "):
72
- section_end = i
73
- break
74
-
75
- if section_start is None:
76
- # Section doesn't exist, add it
77
- return self._add_new_section(content, section, new_item)
78
-
79
- if section_end is None:
80
- section_end = len(lines)
81
54
 
82
55
  # Ensure line length limit (account for "- " prefix)
83
56
  max_item_length = (
@@ -86,11 +59,13 @@ class MemoryContentManager:
86
59
  if len(new_item) > max_item_length:
87
60
  new_item = new_item[: max_item_length - 3] + "..."
88
61
 
89
- # Check for duplicates or similar items using NLP similarity
62
+ # Find existing items and check for duplicates
90
63
  items_to_remove = []
91
- for i in range(section_start + 1, section_end):
92
- if lines[i].strip().startswith("- "):
93
- existing_item = lines[i].strip()[2:] # Remove "- " prefix
64
+ item_indices = []
65
+ for i, line in enumerate(lines):
66
+ if line.strip().startswith("- "):
67
+ item_indices.append(i)
68
+ existing_item = line.strip()[2:] # Remove "- " prefix
94
69
  similarity = self._calculate_similarity(existing_item, new_item)
95
70
 
96
71
  # If highly similar (>80%), mark for removal
@@ -104,77 +79,45 @@ class MemoryContentManager:
104
79
  # Remove similar items (in reverse order to maintain indices)
105
80
  for idx in reversed(items_to_remove):
106
81
  lines.pop(idx)
107
- section_end -= 1
108
-
109
- # Count remaining items after deduplication
110
- item_count = 0
111
- first_item_index = None
112
- for i in range(section_start + 1, section_end):
113
- if lines[i].strip().startswith("- "):
114
- if first_item_index is None:
115
- first_item_index = i
116
- item_count += 1
117
-
118
- # Check if we need to remove oldest item due to section limits
119
- if item_count >= self.memory_limits["max_items_per_section"]:
120
- # Remove oldest item (first one) to make room
121
- if first_item_index is not None:
122
- lines.pop(first_item_index)
123
- section_end -= 1 # Adjust section end after removal
124
-
125
- # Add new item (find insertion point after any comments)
126
- insert_point = section_start + 1
127
- while insert_point < section_end and (
128
- not lines[insert_point].strip()
129
- or lines[insert_point].strip().startswith("<!--")
130
- ):
131
- insert_point += 1
132
82
 
83
+ # Count remaining items
84
+ item_count = sum(1 for line in lines if line.strip().startswith("- "))
85
+
86
+ # Check if we need to remove oldest item due to limits
87
+ max_items = self.memory_limits.get("max_items", 100)
88
+ if item_count >= max_items:
89
+ # Find and remove the first item (oldest)
90
+ for i, line in enumerate(lines):
91
+ if line.strip().startswith("- "):
92
+ lines.pop(i)
93
+ break
94
+
95
+ # Add new item at the end of the list
96
+ # Find the insertion point (after header and metadata, before any trailing empty lines)
97
+ insert_point = len(lines)
98
+ for i in range(len(lines) - 1, -1, -1):
99
+ if lines[i].strip():
100
+ insert_point = i + 1
101
+ break
102
+
133
103
  lines.insert(insert_point, f"- {new_item}")
134
104
 
135
105
  # Update timestamp
136
106
  updated_content = "\n".join(lines)
137
107
  return self.update_timestamp(updated_content)
138
108
 
139
- def _add_new_section(self, content: str, section: str, new_item: str) -> str:
140
- """Add a new section with the given item.
141
-
142
- WHY: When agents discover learnings that don't fit existing sections,
143
- we need to create new sections dynamically while respecting the maximum
144
- section limit.
145
-
109
+ def add_item_to_section(self, content: str, section: str, new_item: str) -> str:
110
+ """Legacy method for backward compatibility - delegates to add_item_to_list.
111
+
146
112
  Args:
147
- content: Current memory content
148
- section: New section name
149
- new_item: First item for the section
150
-
113
+ content: Current memory file content
114
+ section: Section name (ignored in simple list format)
115
+ new_item: Item to add
116
+
151
117
  Returns:
152
- str: Updated content with new section
118
+ str: Updated content with new item added
153
119
  """
154
- lines = content.split("\n")
155
-
156
- # Count existing sections
157
- section_count = sum(1 for line in lines if line.startswith("## "))
158
-
159
- if section_count >= self.memory_limits["max_sections"]:
160
- self.logger.warning(f"Maximum sections reached, cannot add '{section}'")
161
- # Try to add to Recent Learnings instead
162
- return self.add_item_to_section(content, "Recent Learnings", new_item)
163
-
164
- # Find insertion point (before Recent Learnings or at end)
165
- insert_point = len(lines)
166
- for i, line in enumerate(lines):
167
- if line.startswith("## Recent Learnings"):
168
- insert_point = i
169
- break
170
-
171
- # Insert new section
172
- new_section = ["", f"## {section}", f"- {new_item}", ""]
173
-
174
- for j, line in enumerate(new_section):
175
- lines.insert(insert_point + j, line)
176
-
177
- return "\n".join(lines)
120
+ return self.add_item_to_list(content, new_item)
178
121
 
179
122
  def exceeds_limits(
180
123
  self, content: str, agent_limits: Optional[Dict[str, Any]] = None
@@ -193,15 +136,13 @@ class MemoryContentManager:
193
136
  size_kb = len(content.encode("utf-8")) / 1024
194
137
  return size_kb > limits["max_file_size_kb"]
195
138
 
196
- def truncate_to_limits(
139
+ def truncate_simple_list(
197
140
  self, content: str, agent_limits: Optional[Dict[str, Any]] = None
198
141
  ) -> str:
199
- """Truncate content to fit within limits.
142
+ """Truncate simple list content to fit within limits.
200
143
 
201
- WHY: When memory files exceed size limits, we need a strategy to reduce
202
- size while preserving the most important information. This implementation
203
- removes items from "Recent Learnings" first as they're typically less
204
- consolidated than other sections.
144
+ WHY: When memory files exceed size limits, we remove oldest items
145
+ (from the beginning of the list) to maintain the most recent learnings.
205
146
 
206
147
  Args:
207
148
  content: Content to truncate
@@ -213,37 +154,45 @@ class MemoryContentManager:
213
154
  lines = content.split("\n")
214
155
  limits = agent_limits or self.memory_limits
215
156
 
216
- # Strategy: Remove items from Recent Learnings first
157
+ # Strategy: Remove oldest items (from beginning) to keep recent ones
217
158
  while self.exceeds_limits("\n".join(lines), agent_limits):
218
159
  removed = False
219
160
 
220
- # First try Recent Learnings
161
+ # Find and remove the first item (oldest)
221
162
  for i, line in enumerate(lines):
222
- if line.startswith("## Recent Learnings"):
223
- # Find and remove first item in this section
224
- for j in range(i + 1, len(lines)):
225
- if lines[j].strip().startswith("- "):
226
- lines.pop(j)
227
- removed = True
228
- break
229
- elif lines[j].startswith("## "):
230
- break
163
+ if line.strip().startswith("- "):
164
+ lines.pop(i)
165
+ removed = True
231
166
  break
232
167
 
233
- # If no Recent Learnings items, remove from other sections
234
- if not removed:
235
- # Remove from sections in reverse order (bottom up)
236
- for i in range(len(lines) - 1, -1, -1):
237
- if lines[i].strip().startswith("- "):
238
- lines.pop(i)
239
- removed = True
240
- break
241
-
242
168
  # Safety: If nothing removed, truncate from end
243
169
  if not removed:
244
170
  lines = lines[:-10]
245
171
 
172
+ # Also check max_items limit
173
+ max_items = limits.get("max_items", 100)
174
+ item_count = sum(1 for line in lines if line.strip().startswith("- "))
175
+
176
+ if item_count > max_items:
177
+ # Remove oldest items to fit within max_items
178
+ items_removed = 0
179
+ target_removals = item_count - max_items
180
+
181
+ i = 0
182
+ while i < len(lines) and items_removed < target_removals:
183
+ if lines[i].strip().startswith("- "):
184
+ lines.pop(i)
185
+ items_removed += 1
186
+ else:
187
+ i += 1
188
+
246
189
  return "\n".join(lines)
190
+
191
+ def truncate_to_limits(
192
+ self, content: str, agent_limits: Optional[Dict[str, Any]] = None
193
+ ) -> str:
194
+ """Legacy method for backward compatibility - delegates to truncate_simple_list."""
195
+ return self.truncate_simple_list(content, agent_limits)
247
196
 
248
197
  def update_timestamp(self, content: str) -> str:
249
198
  """Update the timestamp in the file header.
@@ -254,18 +203,26 @@ class MemoryContentManager:
254
203
  Returns:
255
204
  str: Content with updated timestamp
256
205
  """
257
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
258
- return re.sub(
206
+ timestamp = datetime.now().isoformat() + "Z"
207
+ # Handle both old and new timestamp formats
208
+ content = re.sub(
209
+ r"<!-- Last Updated: .+? -->",
210
+ f"<!-- Last Updated: {timestamp} -->",
211
+ content,
212
+ )
213
+ # Also handle legacy format
214
+ content = re.sub(
259
215
  r"<!-- Last Updated: .+ \| Auto-updated by: .+ -->",
260
- f"<!-- Last Updated: {timestamp} | Auto-updated by: system -->",
216
+ f"<!-- Last Updated: {timestamp} -->",
261
217
  content,
262
218
  )
219
+ return content
263
220
 
264
221
  def validate_and_repair(self, content: str, agent_id: str) -> str:
265
222
  """Validate memory file and repair if needed.
266
223
 
267
224
  WHY: Memory files might be manually edited by developers or corrupted.
268
- This method ensures the file maintains required structure and sections.
225
+ This method ensures the file maintains proper simple list structure.
269
226
 
270
227
  Args:
271
228
  content: Content to validate
@@ -275,88 +232,94 @@ class MemoryContentManager:
275
232
  str: Validated and repaired content
276
233
  """
277
234
  lines = content.split("\n")
278
- existing_sections = set()
279
-
280
- # Find existing sections
281
- for line in lines:
282
- if line.startswith("## "):
283
- section_name = line[3:].split("(")[0].strip()
284
- existing_sections.add(section_name)
285
-
286
- # Check for required sections
287
- missing_sections = []
288
- for required in self.REQUIRED_SECTIONS:
289
- if required not in existing_sections:
290
- missing_sections.append(required)
291
-
292
- if missing_sections:
293
- self.logger.info(
294
- f"Adding missing sections to {agent_id} memory: {missing_sections}"
295
- )
296
-
297
- # Add missing sections before Recent Learnings
298
- insert_point = len(lines)
299
- for i, line in enumerate(lines):
300
- if line.startswith("## Recent Learnings"):
301
- insert_point = i
302
- break
303
-
304
- for section in missing_sections:
305
- section_content = [
306
- "",
307
- f"## {section}",
308
- "<!-- Section added by repair -->",
309
- "",
310
- ]
311
- for j, line in enumerate(section_content):
312
- lines.insert(insert_point + j, line)
313
- insert_point += len(section_content)
314
-
235
+
236
+ # Ensure proper header format
237
+ has_header = False
238
+ has_timestamp = False
239
+
240
+ for i, line in enumerate(lines[:5]): # Check first 5 lines
241
+ if line.startswith("# Agent Memory:"):
242
+ has_header = True
243
+ elif line.startswith("<!-- Last Updated:"):
244
+ has_timestamp = True
245
+
246
+ # Add missing header or timestamp
247
+ if not has_header or not has_timestamp:
248
+ from datetime import datetime
249
+ new_lines = []
250
+
251
+ if not has_header:
252
+ new_lines.append(f"# Agent Memory: {agent_id}")
253
+ else:
254
+ # Keep existing header
255
+ for line in lines:
256
+ if line.startswith("# "):
257
+ new_lines.append(line)
258
+ lines.remove(line)
259
+ break
260
+
261
+ if not has_timestamp:
262
+ new_lines.append(f"<!-- Last Updated: {datetime.now().isoformat()}Z -->")
263
+ new_lines.append("")
264
+ else:
265
+ # Keep existing timestamp
266
+ for line in lines:
267
+ if line.startswith("<!-- Last Updated:"):
268
+ new_lines.append(line)
269
+ lines.remove(line)
270
+ break
271
+
272
+ # Add remaining content
273
+ for line in lines:
274
+ if not line.startswith("# ") and not line.startswith("<!-- Last Updated:"):
275
+ new_lines.append(line)
276
+
277
+ return "\n".join(new_lines)
278
+
315
279
  return "\n".join(lines)
316
280
 
317
- def parse_memory_content_to_dict(self, content: str) -> Dict[str, List[str]]:
318
- """Parse memory content into structured dictionary format.
281
+ def parse_memory_content_to_list(self, content: str) -> List[str]:
282
+ """Parse memory content into a simple list format.
319
283
 
320
- WHY: Provides consistent parsing of memory content into sections and items
321
- for both display and programmatic access. This ensures the same parsing
322
- logic is used across the system.
284
+ WHY: Provides consistent parsing of memory content as a simple list
285
+ for both display and programmatic access.
323
286
 
324
287
  Args:
325
288
  content: Raw memory file content
326
289
 
327
290
  Returns:
328
- Dict mapping section names to lists of items
291
+ List of memory items
329
292
  """
330
- sections = {}
331
- current_section = None
332
- current_items = []
293
+ items = []
333
294
 
334
295
  for line in content.split("\n"):
335
296
  line = line.strip()
336
297
 
337
- # Skip empty lines and header information
338
- if not line or line.startswith("#") and "Memory Usage" in line:
298
+ # Skip empty lines, headers, and metadata
299
+ if not line or line.startswith("#") or line.startswith("<!--"):
339
300
  continue
340
301
 
341
- if line.startswith("## ") and not line.startswith("## Memory Usage"):
342
- # New section found
343
- if current_section and current_items:
344
- sections[current_section] = current_items.copy()
345
-
346
- current_section = line[3:].strip()
347
- current_items = []
348
-
349
- elif line.startswith("- ") and current_section:
350
- # Item in current section
302
+ if line.startswith("- "):
303
+ # Item in list
351
304
  item = line[2:].strip()
352
305
  if item and len(item) > 3: # Filter out very short items
353
- current_items.append(item)
354
-
355
- # Add final section
356
- if current_section and current_items:
357
- sections[current_section] = current_items
306
+ items.append(item)
358
307
 
359
- return sections
308
+ return items
309
+
310
+ def parse_memory_content_to_dict(self, content: str) -> Dict[str, List[str]]:
311
+ """Legacy method for backward compatibility.
312
+
313
+ Returns a dict with single key 'memories' containing all items.
314
+
315
+ Args:
316
+ content: Raw memory file content
317
+
318
+ Returns:
319
+ Dict with 'memories' key mapping to list of items
320
+ """
321
+ items = self.parse_memory_content_to_list(content)
322
+ return {"memories": items}
360
323
 
361
324
  def _calculate_similarity(self, str1: str, str2: str) -> float:
362
325
  """Calculate similarity between two strings using fuzzy matching.
@@ -400,44 +363,27 @@ class MemoryContentManager:
400
363
 
401
364
  return similarity
402
365
 
403
- def deduplicate_section(self, content: str, section: str) -> Tuple[str, int]:
404
- """Deduplicate items within a section using NLP similarity.
366
+ def deduplicate_list(self, content: str) -> Tuple[str, int]:
367
+ """Deduplicate items in the memory list using NLP similarity.
405
368
 
406
- WHY: Over time, sections can accumulate similar or duplicate items from
407
- different sessions. This method cleans up existing sections by removing
408
- similar items while preserving the most recent/relevant ones.
369
+ WHY: Over time, memory lists can accumulate similar or duplicate items from
370
+ different sessions. This method cleans up by removing similar items while
371
+ preserving the most recent ones.
409
372
 
410
373
  Args:
411
374
  content: Current memory file content
412
- section: Section name to deduplicate
413
375
 
414
376
  Returns:
415
377
  Tuple of (updated content, number of items removed)
416
378
  """
417
379
  lines = content.split("\n")
418
- section_start = None
419
- section_end = None
420
-
421
- # Find section boundaries
422
- for i, line in enumerate(lines):
423
- if line.startswith(f"## {section}"):
424
- section_start = i
425
- elif section_start is not None and line.startswith("## "):
426
- section_end = i
427
- break
428
-
429
- if section_start is None:
430
- return content, 0 # Section not found
431
-
432
- if section_end is None:
433
- section_end = len(lines)
434
380
 
435
- # Collect all items in the section
381
+ # Collect all items in the list
436
382
  items = []
437
383
  item_indices = []
438
- for i in range(section_start + 1, section_end):
439
- if lines[i].strip().startswith("- "):
440
- items.append(lines[i].strip()[2:]) # Remove "- " prefix
384
+ for i, line in enumerate(lines):
385
+ if line.strip().startswith("- "):
386
+ items.append(line.strip()[2:]) # Remove "- " prefix
441
387
  item_indices.append(i)
442
388
 
443
389
  # Find duplicates using pairwise comparison
@@ -464,6 +410,18 @@ class MemoryContentManager:
464
410
  lines.pop(item_indices[idx])
465
411
 
466
412
  return "\n".join(lines), removed_count
413
+
414
+ def deduplicate_section(self, content: str, section: str) -> Tuple[str, int]:
415
+ """Legacy method for backward compatibility - delegates to deduplicate_list.
416
+
417
+ Args:
418
+ content: Current memory file content
419
+ section: Section name (ignored in simple list format)
420
+
421
+ Returns:
422
+ Tuple of (updated content, number of items removed)
423
+ """
424
+ return self.deduplicate_list(content)
467
425
 
468
426
  def validate_memory_size(self, content: str) -> tuple[bool, Optional[str]]:
469
427
  """Validate memory content size and structure.
@@ -477,7 +435,7 @@ class MemoryContentManager:
477
435
  try:
478
436
  # Check file size
479
437
  size_kb = len(content.encode("utf-8")) / 1024
480
- max_size_kb = self.memory_limits.get("max_file_size_kb", 8)
438
+ max_size_kb = self.memory_limits.get("max_file_size_kb", 80)
481
439
 
482
440
  if size_kb > max_size_kb:
483
441
  return (
@@ -485,20 +443,12 @@ class MemoryContentManager:
485
443
  f"Memory size {size_kb:.1f}KB exceeds limit of {max_size_kb}KB",
486
444
  )
487
445
 
488
- # Check section count
489
- sections = re.findall(r"^##\s+(.+)$", content, re.MULTILINE)
490
- max_sections = self.memory_limits.get("max_sections", 10)
491
-
492
- if len(sections) > max_sections:
493
- return False, f"Too many sections: {len(sections)} (max {max_sections})"
494
-
495
- # Check for required sections
496
- required = set(self.REQUIRED_SECTIONS)
497
- found = set(sections)
498
- missing = required - found
446
+ # Check item count
447
+ items = sum(1 for line in content.split("\n") if line.strip().startswith("- "))
448
+ max_items = self.memory_limits.get("max_items", 100)
499
449
 
500
- if missing:
501
- return False, f"Missing required sections: {', '.join(missing)}"
450
+ if items > max_items:
451
+ return False, f"Too many items: {items} (max {max_items})"
502
452
 
503
453
  return True, None
504
454
 
@@ -68,7 +68,7 @@ class MemoryTemplateGenerator:
68
68
  def _create_basic_memory_template(
69
69
  self, agent_id: str, limits: Dict[str, Any]
70
70
  ) -> str:
71
- """Create basic memory template when project analysis fails.
71
+ """Create basic memory template as a simple list.
72
72
 
73
73
  Args:
74
74
  agent_id: The agent identifier
@@ -77,45 +77,9 @@ class MemoryTemplateGenerator:
77
77
  Returns:
78
78
  str: Basic memory template
79
79
  """
80
- agent_name = agent_id.replace("_agent", "").replace("_", " ").title()
81
- project_name = self.working_directory.name
82
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
83
-
84
- return f"""# {agent_name} Agent Memory - {project_name}
85
-
86
- <!-- MEMORY LIMITS: {limits['max_file_size_kb']}KB max | {limits['max_sections']} sections max | {limits['max_items_per_section']} items per section -->
87
- <!-- Last Updated: {timestamp} | Auto-updated by: {agent_id} -->
88
-
89
- ## Project Context
90
- {project_name}: Software project requiring analysis
91
-
92
- ## Project Architecture
93
- - Analyze project structure to understand architecture patterns
94
-
95
- ## Coding Patterns Learned
96
- - Observe codebase patterns and conventions during tasks
97
-
98
- ## Implementation Guidelines
99
- - Extract implementation guidelines from project documentation
100
-
101
- ## Domain-Specific Knowledge
102
- <!-- Agent-specific knowledge accumulates here -->
103
-
104
- ## Effective Strategies
105
- <!-- Successful approaches discovered through experience -->
106
-
107
- ## Common Mistakes to Avoid
108
- - Learn from errors encountered during project work
109
-
110
- ## Integration Points
111
- <!-- Key interfaces and integration patterns -->
112
-
113
- ## Performance Considerations
114
- <!-- Performance insights and optimization patterns -->
80
+ timestamp = datetime.now().isoformat() + "Z"
115
81
 
116
- ## Current Technical Context
117
- - Project analysis pending - gather context during tasks
82
+ return f"""# Agent Memory: {agent_id}
83
+ <!-- Last Updated: {timestamp} -->
118
84
 
119
- ## Recent Learnings
120
- <!-- Most recent discoveries and insights -->
121
85
  """
@@ -0,0 +1,18 @@
1
+ """Event Bus Service for decoupled event handling.
2
+
3
+ This module provides a centralized event bus that decouples event producers
4
+ (like hooks) from consumers (like Socket.IO). It uses pyee's AsyncIOEventEmitter
5
+ to support both synchronous publishing and asynchronous consumption.
6
+
7
+ WHY event bus architecture:
8
+ - Decouples hooks from Socket.IO implementation details
9
+ - Allows multiple consumers for the same events
10
+ - Enables easy testing without Socket.IO dependencies
11
+ - Provides event filtering and routing capabilities
12
+ - Supports both sync (hooks) and async (Socket.IO) contexts
13
+ """
14
+
15
+ from .event_bus import EventBus
16
+ from .relay import SocketIORelay
17
+
18
+ __all__ = ["EventBus", "SocketIORelay"]