claude-mpm 5.6.12__py3-none-any.whl → 5.6.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/cli/commands/commander.py +173 -3
  3. claude_mpm/cli/parsers/commander_parser.py +41 -8
  4. claude_mpm/cli/startup.py +10 -1
  5. claude_mpm/cli/startup_display.py +2 -1
  6. claude_mpm/commander/__init__.py +6 -0
  7. claude_mpm/commander/adapters/__init__.py +32 -3
  8. claude_mpm/commander/adapters/auggie.py +260 -0
  9. claude_mpm/commander/adapters/base.py +98 -1
  10. claude_mpm/commander/adapters/claude_code.py +32 -1
  11. claude_mpm/commander/adapters/codex.py +237 -0
  12. claude_mpm/commander/adapters/example_usage.py +310 -0
  13. claude_mpm/commander/adapters/mpm.py +389 -0
  14. claude_mpm/commander/adapters/registry.py +204 -0
  15. claude_mpm/commander/api/app.py +32 -16
  16. claude_mpm/commander/api/routes/messages.py +11 -11
  17. claude_mpm/commander/api/routes/projects.py +20 -20
  18. claude_mpm/commander/api/routes/sessions.py +19 -21
  19. claude_mpm/commander/api/routes/work.py +86 -50
  20. claude_mpm/commander/api/schemas.py +4 -0
  21. claude_mpm/commander/chat/cli.py +4 -0
  22. claude_mpm/commander/daemon.py +139 -9
  23. claude_mpm/commander/env_loader.py +59 -0
  24. claude_mpm/commander/memory/__init__.py +45 -0
  25. claude_mpm/commander/memory/compression.py +347 -0
  26. claude_mpm/commander/memory/embeddings.py +230 -0
  27. claude_mpm/commander/memory/entities.py +310 -0
  28. claude_mpm/commander/memory/example_usage.py +290 -0
  29. claude_mpm/commander/memory/integration.py +325 -0
  30. claude_mpm/commander/memory/search.py +381 -0
  31. claude_mpm/commander/memory/store.py +657 -0
  32. claude_mpm/commander/registry.py +10 -4
  33. claude_mpm/commander/work/executor.py +22 -12
  34. claude_mpm/core/output_style_manager.py +34 -7
  35. claude_mpm/hooks/claude_hooks/auto_pause_handler.py +0 -0
  36. claude_mpm/hooks/claude_hooks/event_handlers.py +0 -0
  37. claude_mpm/hooks/claude_hooks/hook_handler.py +0 -0
  38. claude_mpm/hooks/claude_hooks/memory_integration.py +0 -0
  39. claude_mpm/hooks/claude_hooks/response_tracking.py +0 -0
  40. claude_mpm/hooks/templates/pre_tool_use_template.py +0 -0
  41. claude_mpm/scripts/start_activity_logging.py +0 -0
  42. claude_mpm/skills/__init__.py +2 -1
  43. claude_mpm/skills/registry.py +295 -90
  44. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/METADATA +2 -2
  45. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/RECORD +43 -29
  46. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/WHEEL +0 -0
  47. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/entry_points.txt +0 -0
  48. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/licenses/LICENSE +0 -0
  49. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/licenses/LICENSE-FAQ.md +0 -0
  50. {claude_mpm-5.6.12.dist-info → claude_mpm-5.6.14.dist-info}/top_level.txt +0 -0
@@ -16,8 +16,10 @@ from .api.app import (
16
16
  )
17
17
  from .config import DaemonConfig
18
18
  from .core.block_manager import BlockManager
19
+ from .env_loader import load_env
19
20
  from .events.manager import EventManager
20
21
  from .inbox import Inbox
22
+ from .models.events import EventStatus
21
23
  from .parsing.output_parser import OutputParser
22
24
  from .persistence import EventStore, StateStore
23
25
  from .project_session import ProjectSession, SessionState
@@ -28,6 +30,9 @@ from .work.executor import WorkExecutor
28
30
  from .work.queue import WorkQueue
29
31
  from .workflow.event_handler import EventHandler
30
32
 
33
+ # Load environment variables at module import
34
+ load_env()
35
+
31
36
  logger = logging.getLogger(__name__)
32
37
 
33
38
 
@@ -159,12 +164,16 @@ class CommanderDaemon:
159
164
  # Set up signal handlers
160
165
  self._setup_signal_handlers()
161
166
 
162
- # Inject global instances into API app
163
- global api_registry, api_tmux, api_event_manager, api_inbox
164
- api_registry = self.registry
165
- api_tmux = self.orchestrator
166
- api_event_manager = self.event_manager
167
- api_inbox = self.inbox
167
+ # Inject daemon instances into API app.state (BEFORE lifespan runs)
168
+ app.state.registry = self.registry
169
+ app.state.tmux = self.orchestrator
170
+ app.state.event_manager = self.event_manager
171
+ app.state.inbox = self.inbox
172
+ app.state.work_queues = self.work_queues
173
+ app.state.daemon_instance = self
174
+ app.state.session_manager = self.sessions
175
+ app.state.event_handler = self.event_handler
176
+ logger.info(f"Injected work_queues dict id: {id(self.work_queues)}")
168
177
 
169
178
  # Start API server in background
170
179
  logger.info(f"Starting API server on {self.config.host}:{self.config.port}")
@@ -257,9 +266,19 @@ class CommanderDaemon:
257
266
 
258
267
  while self._running:
259
268
  try:
260
- # TODO: Check for resolved events and resume sessions (Phase 2 Sprint 3)
261
- # TODO: Check each ProjectSession for runnable work (Phase 2 Sprint 2)
262
- # TODO: Spawn RuntimeExecutors for new work items (Phase 2 Sprint 1)
269
+ logger.info(f"🔄 Main loop iteration (running={self._running})")
270
+ logger.info(
271
+ f"work_queues dict id: {id(self.work_queues)}, keys: {list(self.work_queues.keys())}"
272
+ )
273
+
274
+ # Check for resolved events and resume sessions
275
+ await self._check_and_resume_sessions()
276
+
277
+ # Check each ProjectSession for runnable work
278
+ logger.info(
279
+ f"Checking for pending work across {len(self.work_queues)} queues"
280
+ )
281
+ await self._execute_pending_work()
263
282
 
264
283
  # Periodic state persistence
265
284
  current_time = asyncio.get_event_loop().time()
@@ -429,6 +448,117 @@ class CommanderDaemon:
429
448
  except Exception as e:
430
449
  logger.error(f"Failed to save state: {e}", exc_info=True)
431
450
 
451
+ async def _check_and_resume_sessions(self) -> None:
452
+ """Check for resolved events and resume paused sessions.
453
+
454
+ Iterates through all paused sessions, checks if their blocking events
455
+ have been resolved, and resumes execution if ready.
456
+ """
457
+ for project_id, session in list(self.sessions.items()):
458
+ # Skip non-paused sessions
459
+ if session.state != SessionState.PAUSED:
460
+ continue
461
+
462
+ # Check if pause reason (event ID) is resolved
463
+ if not session.pause_reason:
464
+ logger.warning(f"Session {project_id} paused with no reason, resuming")
465
+ await session.resume()
466
+ continue
467
+
468
+ # Check if event is resolved
469
+ event = self.event_manager.get(session.pause_reason)
470
+ if event and event.status == EventStatus.RESOLVED:
471
+ logger.info(
472
+ f"Event {event.id} resolved, resuming session for {project_id}"
473
+ )
474
+ await session.resume()
475
+
476
+ # Unblock any work items that were blocked by this event
477
+ if project_id in self.work_executors:
478
+ executor = self.work_executors[project_id]
479
+ queue = self.work_queues[project_id]
480
+
481
+ # Find work items blocked by this event
482
+ blocked_items = [
483
+ item
484
+ for item in queue.list()
485
+ if item.state.value == "blocked"
486
+ and item.metadata.get("block_reason") == event.id
487
+ ]
488
+
489
+ for item in blocked_items:
490
+ await executor.handle_unblock(item.id)
491
+ logger.info(f"Unblocked work item {item.id}")
492
+
493
+ async def _execute_pending_work(self) -> None:
494
+ """Execute pending work for all ready sessions.
495
+
496
+ Scans all work queues for pending work. For projects with work but no session,
497
+ auto-creates a session. Then executes the next available work item via WorkExecutor.
498
+ """
499
+ # First pass: Auto-create and start sessions for projects with pending work
500
+ for project_id, queue in list(self.work_queues.items()):
501
+ logger.info(
502
+ f"Checking queue for {project_id}: pending={queue.pending_count}"
503
+ )
504
+ # Skip if no pending work
505
+ if queue.pending_count == 0:
506
+ continue
507
+
508
+ # Auto-create session if needed
509
+ if project_id not in self.sessions:
510
+ try:
511
+ logger.info(
512
+ f"Auto-creating session for project {project_id} with pending work"
513
+ )
514
+ session = self.get_or_create_session(project_id)
515
+
516
+ # Start the session so it's ready for work
517
+ if session.state.value == "idle":
518
+ logger.info(f"Auto-starting session for {project_id}")
519
+ await session.start()
520
+ except Exception as e:
521
+ logger.error(
522
+ f"Failed to auto-create/start session for {project_id}: {e}",
523
+ exc_info=True,
524
+ )
525
+ continue
526
+
527
+ # Second pass: Execute work for ready sessions
528
+ for project_id, session in list(self.sessions.items()):
529
+ # Skip sessions that aren't ready for work
530
+ if not session.is_ready():
531
+ continue
532
+
533
+ # Skip if no work queue exists
534
+ if project_id not in self.work_queues:
535
+ continue
536
+
537
+ # Get work executor for project
538
+ executor = self.work_executors.get(project_id)
539
+ if not executor:
540
+ logger.warning(
541
+ f"No work executor found for project {project_id}, skipping"
542
+ )
543
+ continue
544
+
545
+ # Check if there's work available
546
+ queue = self.work_queues[project_id]
547
+ if queue.pending_count == 0:
548
+ continue
549
+
550
+ # Try to execute next work item
551
+ try:
552
+ # Pass the session's active pane for execution
553
+ executed = await executor.execute_next(pane_target=session.active_pane)
554
+ if executed:
555
+ logger.info(f"Started work execution for project {project_id}")
556
+ except Exception as e:
557
+ logger.error(
558
+ f"Error executing work for project {project_id}: {e}",
559
+ exc_info=True,
560
+ )
561
+
432
562
 
433
563
  async def main(config: Optional[DaemonConfig] = None) -> None:
434
564
  """Main entry point for running the daemon.
@@ -0,0 +1,59 @@
1
+ """Environment variable loader for Commander.
2
+
3
+ This module handles automatic loading of .env and .env.local files
4
+ at Commander startup. Environment files are loaded with the following precedence:
5
+ 1. Existing environment variables (not overridden)
6
+ 2. .env.local (local overrides)
7
+ 3. .env (defaults)
8
+
9
+ Example:
10
+ >>> from claude_mpm.commander.env_loader import load_env
11
+ >>> load_env()
12
+ # Automatically loads .env.local and .env from project root
13
+ """
14
+
15
+ import logging
16
+ from pathlib import Path
17
+
18
+ from dotenv import load_dotenv
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def load_env() -> None:
24
+ """Load environment variables from .env and .env.local files.
25
+
26
+ Searches for .env and .env.local in the project root directory
27
+ (parent of src/claude_mpm). Files are loaded with override=False,
28
+ meaning existing environment variables take precedence.
29
+
30
+ Precedence (highest to lowest):
31
+ 1. Existing environment variables
32
+ 2. .env.local
33
+ 3. .env
34
+
35
+ Example:
36
+ >>> load_env()
37
+ # Loads .env.local and .env if they exist
38
+ """
39
+ # Find project root (parent of src/claude_mpm)
40
+ # Current file: src/claude_mpm/commander/env_loader.py
41
+ # Project root: ../../../ (3 levels up)
42
+ current_file = Path(__file__)
43
+ project_root = current_file.parent.parent.parent.parent
44
+
45
+ # Try loading .env.local first (higher priority)
46
+ env_local = project_root / ".env.local"
47
+ if env_local.exists():
48
+ load_dotenv(env_local, override=False)
49
+ logger.debug(f"Loaded environment from {env_local}")
50
+
51
+ # Then load .env (lower priority)
52
+ env_file = project_root / ".env"
53
+ if env_file.exists():
54
+ load_dotenv(env_file, override=False)
55
+ logger.debug(f"Loaded environment from {env_file}")
56
+
57
+ # Log if neither file exists
58
+ if not env_local.exists() and not env_file.exists():
59
+ logger.debug("No .env or .env.local files found in project root")
@@ -0,0 +1,45 @@
1
+ """Conversation memory system for Commander.
2
+
3
+ This module provides semantic search, storage, and context compression
4
+ for all Claude Code instance conversations.
5
+
6
+ Key Components:
7
+ - ConversationStore: CRUD operations for conversations
8
+ - EmbeddingService: Generate vector embeddings
9
+ - SemanticSearch: Query conversations semantically
10
+ - ContextCompressor: Summarize conversations for context
11
+ - EntityExtractor: Extract files, functions, errors
12
+
13
+ Example:
14
+ >>> from claude_mpm.commander.memory import (
15
+ ... ConversationStore,
16
+ ... EmbeddingService,
17
+ ... SemanticSearch,
18
+ ... ContextCompressor,
19
+ ... )
20
+ >>> store = ConversationStore()
21
+ >>> embeddings = EmbeddingService()
22
+ >>> search = SemanticSearch(store, embeddings)
23
+ >>> results = await search.search("how did we fix the login bug?")
24
+ """
25
+
26
+ from .compression import ContextCompressor
27
+ from .embeddings import EmbeddingService
28
+ from .entities import Entity, EntityExtractor, EntityType
29
+ from .integration import MemoryIntegration
30
+ from .search import SearchResult, SemanticSearch
31
+ from .store import Conversation, ConversationMessage, ConversationStore
32
+
33
+ __all__ = [
34
+ "ContextCompressor",
35
+ "Conversation",
36
+ "ConversationMessage",
37
+ "ConversationStore",
38
+ "EmbeddingService",
39
+ "Entity",
40
+ "EntityExtractor",
41
+ "EntityType",
42
+ "MemoryIntegration",
43
+ "SearchResult",
44
+ "SemanticSearch",
45
+ ]
@@ -0,0 +1,347 @@
1
+ """Context compression and conversation summarization.
2
+
3
+ Compresses long conversations into concise summaries for efficient context
4
+ loading when resuming sessions or searching past work.
5
+ """
6
+
7
+ import logging
8
+ from typing import List, Optional
9
+
10
+ from ..llm.openrouter_client import OpenRouterClient
11
+ from .store import Conversation, ConversationMessage
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class ContextCompressor:
17
+ """Compress conversations into summaries for context loading.
18
+
19
+ Uses cheap LLM (mistral-small) to generate summaries of conversations
20
+ and compress multiple conversations into context strings.
21
+
22
+ Attributes:
23
+ client: OpenRouterClient for LLM requests
24
+ summary_threshold: Minimum messages to trigger summarization
25
+ max_context_tokens: Maximum tokens for compressed context
26
+
27
+ Example:
28
+ >>> compressor = ContextCompressor(client)
29
+ >>> summary = await compressor.summarize(messages)
30
+ >>> context = await compressor.compress_for_context(
31
+ ... conversations,
32
+ ... max_tokens=4000
33
+ ... )
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ client: OpenRouterClient,
39
+ summary_threshold: int = 10,
40
+ max_context_tokens: int = 4000,
41
+ ):
42
+ """Initialize context compressor.
43
+
44
+ Args:
45
+ client: OpenRouterClient for LLM requests
46
+ summary_threshold: Minimum messages to summarize
47
+ max_context_tokens: Maximum tokens for context string
48
+ """
49
+ self.client = client
50
+ self.summary_threshold = summary_threshold
51
+ self.max_context_tokens = max_context_tokens
52
+
53
+ logger.info(
54
+ "ContextCompressor initialized (threshold: %d msgs, max_tokens: %d)",
55
+ summary_threshold,
56
+ max_context_tokens,
57
+ )
58
+
59
+ async def summarize(
60
+ self,
61
+ messages: List[ConversationMessage],
62
+ focus: Optional[str] = None,
63
+ ) -> str:
64
+ """Generate summary of conversation messages.
65
+
66
+ Args:
67
+ messages: List of messages to summarize
68
+ focus: Optional focus area (e.g., "bug fixes", "API changes")
69
+
70
+ Returns:
71
+ Concise summary (2-4 sentences)
72
+
73
+ Example:
74
+ >>> summary = await compressor.summarize(messages)
75
+ >>> print(summary)
76
+ "Fixed login authentication bug in src/auth.py by updating token validation..."
77
+ """
78
+ if len(messages) < 2:
79
+ # Too short to summarize
80
+ return messages[0].content if messages else ""
81
+
82
+ # Build conversation text
83
+ conversation_text = self._format_messages(messages)
84
+
85
+ # Build summarization prompt
86
+ if focus:
87
+ prompt = f"""Summarize the following conversation, focusing on: {focus}
88
+
89
+ Conversation:
90
+ {conversation_text}
91
+
92
+ Provide a concise summary (2-4 sentences) that captures:
93
+ 1. What was the main task or problem
94
+ 2. What actions were taken
95
+ 3. What was the outcome or current status
96
+ 4. Any important files, functions, or errors mentioned
97
+
98
+ Summary:"""
99
+ else:
100
+ prompt = f"""Summarize the following conversation in 2-4 sentences.
101
+
102
+ Conversation:
103
+ {conversation_text}
104
+
105
+ Focus on:
106
+ 1. What was the main task or problem
107
+ 2. What actions were taken
108
+ 3. What was the outcome or current status
109
+
110
+ Summary:"""
111
+
112
+ messages_for_llm = [{"role": "user", "content": prompt}]
113
+
114
+ system = (
115
+ "You are a technical summarization assistant. "
116
+ "Provide clear, concise summaries of development conversations. "
117
+ "Focus on actionable information and key outcomes."
118
+ )
119
+
120
+ summary = await self.client.chat(messages_for_llm, system=system)
121
+ logger.debug(
122
+ "Generated summary (%d chars) from %d messages", len(summary), len(messages)
123
+ )
124
+
125
+ return summary.strip()
126
+
127
+ async def compress_for_context(
128
+ self,
129
+ conversations: List[Conversation],
130
+ max_tokens: Optional[int] = None,
131
+ prioritize_recent: bool = True,
132
+ ) -> str:
133
+ """Compress multiple conversations into context string.
134
+
135
+ Prioritizes recent conversations and uses summaries for older ones
136
+ to fit within token budget.
137
+
138
+ Args:
139
+ conversations: List of conversations to compress
140
+ max_tokens: Maximum tokens (default: self.max_context_tokens)
141
+ prioritize_recent: Whether to prioritize recent conversations
142
+
143
+ Returns:
144
+ Compressed context string ready for LLM input
145
+
146
+ Example:
147
+ >>> context = await compressor.compress_for_context(
148
+ ... conversations,
149
+ ... max_tokens=4000
150
+ ... )
151
+ >>> print(f"Context: {len(context)} chars")
152
+ """
153
+ if max_tokens is None:
154
+ max_tokens = self.max_context_tokens
155
+
156
+ # Sort by recency if prioritizing
157
+ if prioritize_recent:
158
+ conversations = sorted(
159
+ conversations, key=lambda c: c.updated_at, reverse=True
160
+ )
161
+
162
+ # Build context incrementally
163
+ context_parts = []
164
+ current_tokens = 0
165
+
166
+ for conv in conversations:
167
+ # Use summary if available, else generate one
168
+ if conv.summary:
169
+ summary_text = conv.summary
170
+ elif len(conv.messages) >= self.summary_threshold:
171
+ # Generate summary on-the-fly
172
+ summary_text = await self.summarize(conv.messages)
173
+ else:
174
+ # Use full conversation for short ones
175
+ summary_text = conv.get_full_text()
176
+
177
+ # Format conversation section
178
+ section = self._format_conversation_section(conv, summary_text)
179
+ section_tokens = len(section) // 4 # Rough approximation
180
+
181
+ # Check if adding this would exceed budget
182
+ if current_tokens + section_tokens > max_tokens:
183
+ # Try to fit summary only
184
+ short_summary = summary_text.split(". ")[0] + "."
185
+ short_section = self._format_conversation_section(conv, short_summary)
186
+ short_tokens = len(short_section) // 4
187
+
188
+ if current_tokens + short_tokens <= max_tokens:
189
+ context_parts.append(short_section)
190
+ current_tokens += short_tokens
191
+ else:
192
+ # Can't fit any more, stop
193
+ break
194
+ else:
195
+ context_parts.append(section)
196
+ current_tokens += section_tokens
197
+
198
+ context = "\n\n---\n\n".join(context_parts)
199
+
200
+ logger.info(
201
+ "Compressed %d conversations into context (%d chars, ~%d tokens)",
202
+ len(context_parts),
203
+ len(context),
204
+ current_tokens,
205
+ )
206
+
207
+ return context
208
+
209
+ def needs_summarization(self, messages: List[ConversationMessage]) -> bool:
210
+ """Check if conversation needs summarization.
211
+
212
+ Args:
213
+ messages: List of messages to check
214
+
215
+ Returns:
216
+ True if message count exceeds threshold
217
+
218
+ Example:
219
+ >>> if compressor.needs_summarization(messages):
220
+ ... summary = await compressor.summarize(messages)
221
+ """
222
+ return len(messages) >= self.summary_threshold
223
+
224
+ def _format_messages(
225
+ self,
226
+ messages: List[ConversationMessage],
227
+ max_messages: Optional[int] = None,
228
+ ) -> str:
229
+ """Format messages as text for summarization.
230
+
231
+ Args:
232
+ messages: Messages to format
233
+ max_messages: Maximum messages to include
234
+
235
+ Returns:
236
+ Formatted conversation text
237
+ """
238
+ if max_messages:
239
+ messages = messages[:max_messages]
240
+
241
+ lines = []
242
+ for msg in messages:
243
+ # Format: ROLE: content
244
+ lines.append(f"{msg.role.upper()}: {msg.content}")
245
+
246
+ return "\n\n".join(lines)
247
+
248
+ def _format_conversation_section(
249
+ self, conversation: Conversation, summary: str
250
+ ) -> str:
251
+ """Format conversation section for context string.
252
+
253
+ Args:
254
+ conversation: Conversation to format
255
+ summary: Summary or full text
256
+
257
+ Returns:
258
+ Formatted section with metadata
259
+ """
260
+ # Format timestamp
261
+ timestamp = conversation.updated_at.strftime("%Y-%m-%d %H:%M")
262
+
263
+ # Build section
264
+ return f"""## Conversation: {conversation.id}
265
+ **Project:** {conversation.project_id}
266
+ **Instance:** {conversation.instance_name}
267
+ **Updated:** {timestamp}
268
+ **Messages:** {conversation.message_count}
269
+
270
+ {summary}"""
271
+
272
+ async def auto_summarize_conversation(
273
+ self, conversation: Conversation
274
+ ) -> Optional[str]:
275
+ """Automatically summarize conversation if needed.
276
+
277
+ Checks if conversation needs summarization and generates one if so.
278
+ Updates the conversation's summary field but does NOT save to store.
279
+
280
+ Args:
281
+ conversation: Conversation to summarize
282
+
283
+ Returns:
284
+ Summary if generated, None if not needed
285
+
286
+ Example:
287
+ >>> summary = await compressor.auto_summarize_conversation(conv)
288
+ >>> if summary:
289
+ ... conv.summary = summary
290
+ ... await store.save(conv)
291
+ """
292
+ if not self.needs_summarization(conversation.messages):
293
+ logger.debug(
294
+ "Conversation %s too short to summarize (%d messages)",
295
+ conversation.id,
296
+ len(conversation.messages),
297
+ )
298
+ return None
299
+
300
+ if conversation.summary:
301
+ logger.debug("Conversation %s already has summary", conversation.id)
302
+ return conversation.summary
303
+
304
+ # Generate summary
305
+ summary = await self.summarize(conversation.messages)
306
+ logger.info("Auto-generated summary for conversation %s", conversation.id)
307
+
308
+ return summary
309
+
310
+ async def update_summary_if_stale(
311
+ self,
312
+ conversation: Conversation,
313
+ message_threshold: int = 5,
314
+ ) -> Optional[str]:
315
+ """Update summary if conversation has grown significantly.
316
+
317
+ Args:
318
+ conversation: Conversation to check
319
+ message_threshold: New messages required to trigger update
320
+
321
+ Returns:
322
+ Updated summary if regenerated, None otherwise
323
+
324
+ Example:
325
+ >>> updated = await compressor.update_summary_if_stale(conv)
326
+ >>> if updated:
327
+ ... conv.summary = updated
328
+ ... await store.save(conv)
329
+ """
330
+ if not conversation.summary:
331
+ # No existing summary, generate one
332
+ return await self.auto_summarize_conversation(conversation)
333
+
334
+ # Check if conversation has grown significantly
335
+ # (Simple heuristic: if more than threshold messages since last summarization)
336
+ # In practice, you'd track when summary was generated
337
+ if len(conversation.messages) < self.summary_threshold + message_threshold:
338
+ return None
339
+
340
+ # Regenerate summary
341
+ logger.info(
342
+ "Regenerating stale summary for conversation %s (%d messages)",
343
+ conversation.id,
344
+ len(conversation.messages),
345
+ )
346
+
347
+ return await self.summarize(conversation.messages)