claude-self-reflect 3.2.4 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +992 -510
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/installer/cli.js +16 -0
  5. package/installer/postinstall.js +14 -0
  6. package/installer/statusline-setup.js +289 -0
  7. package/mcp-server/run-mcp.sh +73 -5
  8. package/mcp-server/src/app_context.py +64 -0
  9. package/mcp-server/src/config.py +57 -0
  10. package/mcp-server/src/connection_pool.py +286 -0
  11. package/mcp-server/src/decay_manager.py +106 -0
  12. package/mcp-server/src/embedding_manager.py +64 -40
  13. package/mcp-server/src/embeddings_old.py +141 -0
  14. package/mcp-server/src/models.py +64 -0
  15. package/mcp-server/src/parallel_search.py +305 -0
  16. package/mcp-server/src/project_resolver.py +5 -0
  17. package/mcp-server/src/reflection_tools.py +211 -0
  18. package/mcp-server/src/rich_formatting.py +196 -0
  19. package/mcp-server/src/search_tools.py +874 -0
  20. package/mcp-server/src/server.py +127 -1720
  21. package/mcp-server/src/temporal_design.py +132 -0
  22. package/mcp-server/src/temporal_tools.py +604 -0
  23. package/mcp-server/src/temporal_utils.py +384 -0
  24. package/mcp-server/src/utils.py +150 -67
  25. package/package.json +15 -1
  26. package/scripts/add-timestamp-indexes.py +134 -0
  27. package/scripts/ast_grep_final_analyzer.py +325 -0
  28. package/scripts/ast_grep_unified_registry.py +556 -0
  29. package/scripts/check-collections.py +29 -0
  30. package/scripts/csr-status +366 -0
  31. package/scripts/debug-august-parsing.py +76 -0
  32. package/scripts/debug-import-single.py +91 -0
  33. package/scripts/debug-project-resolver.py +82 -0
  34. package/scripts/debug-temporal-tools.py +135 -0
  35. package/scripts/delta-metadata-update.py +547 -0
  36. package/scripts/import-conversations-unified.py +157 -25
  37. package/scripts/precompact-hook.sh +33 -0
  38. package/scripts/session_quality_tracker.py +481 -0
  39. package/scripts/streaming-watcher.py +1578 -0
  40. package/scripts/update_patterns.py +334 -0
  41. package/scripts/utils.py +39 -0
@@ -0,0 +1,604 @@
1
+ """
2
+ Temporal tools for Claude Self-Reflect MCP server.
3
+ Contains MCP tools for time-based queries and work session tracking.
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ import hashlib
9
+ from pathlib import Path
10
+ from typing import Optional, List, Dict, Any
11
+ from datetime import datetime, timezone, timedelta
12
+ from collections import Counter, defaultdict
13
+
14
+ from fastmcp import Context
15
+ from pydantic import Field
16
+ from qdrant_client import AsyncQdrantClient, models
17
+ from qdrant_client.models import OrderBy
18
+
19
+ from .temporal_utils import SessionDetector, TemporalParser, group_by_time_period
20
+ from .project_resolver import ProjectResolver
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class TemporalTools:
26
+ """Temporal query tools for MCP server."""
27
+
28
+ def __init__(self,
29
+ qdrant_client: AsyncQdrantClient,
30
+ qdrant_url: str,
31
+ get_all_collections_func,
32
+ generate_embedding_func,
33
+ initialize_embeddings_func,
34
+ normalize_project_name_func):
35
+ """
36
+ Initialize temporal tools.
37
+
38
+ Args:
39
+ qdrant_client: Async Qdrant client
40
+ qdrant_url: Qdrant server URL
41
+ get_all_collections_func: Function to get all collections
42
+ generate_embedding_func: Function to generate embeddings
43
+ initialize_embeddings_func: Function to initialize embeddings
44
+ normalize_project_name_func: Function to normalize project names
45
+ """
46
+ self.qdrant_client = qdrant_client
47
+ self.qdrant_url = qdrant_url
48
+ self.get_all_collections = get_all_collections_func
49
+ self.generate_embedding = generate_embedding_func
50
+ self.initialize_embeddings = initialize_embeddings_func
51
+ self.normalize_project_name = normalize_project_name_func
52
+
53
+ async def get_recent_work(
54
+ self,
55
+ ctx: Context,
56
+ limit: int = Field(default=10, description="Number of recent conversations to return"),
57
+ project: Optional[str] = Field(default=None, description="Specific project or 'all' for cross-project"),
58
+ include_reflections: bool = Field(default=True, description="Include stored reflections"),
59
+ group_by: str = Field(default="conversation", description="Group by 'conversation', 'day', or 'session'")
60
+ ) -> str:
61
+ """Get recent work conversations to answer 'What did we work on last?' queries."""
62
+
63
+ await ctx.debug(f"Getting recent work: limit={limit}, project={project}, group_by={group_by}")
64
+
65
+ try:
66
+ # Determine project scope
67
+ target_project = project
68
+ if project is None:
69
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
70
+ path_parts = Path(cwd).parts
71
+ if 'projects' in path_parts:
72
+ idx = path_parts.index('projects')
73
+ if idx + 1 < len(path_parts):
74
+ target_project = path_parts[idx + 1]
75
+ if target_project is None:
76
+ target_project = Path(cwd).name
77
+
78
+ # Get all collections
79
+ all_collections = await self.get_all_collections()
80
+ if not all_collections:
81
+ return "<error>No conversation collections found. Please import conversations first.</error>"
82
+
83
+ # Filter collections by project
84
+ if target_project != 'all':
85
+ # Use asyncio.to_thread to avoid blocking the event loop
86
+ import asyncio
87
+ from qdrant_client import QdrantClient as SyncQdrantClient
88
+
89
+ def get_project_collections():
90
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
91
+ resolver = ProjectResolver(sync_client)
92
+ return resolver.find_collections_for_project(target_project)
93
+
94
+ # Run sync client in thread pool to avoid blocking
95
+ project_collections = await asyncio.to_thread(get_project_collections)
96
+
97
+ if not project_collections:
98
+ normalized_name = self.normalize_project_name(target_project)
99
+ project_hash = hashlib.md5(normalized_name.encode()).hexdigest()[:8]
100
+ project_collections = [
101
+ c for c in all_collections
102
+ if c.startswith(f"conv_{project_hash}_")
103
+ ]
104
+
105
+ if include_reflections:
106
+ reflections_collections = [c for c in all_collections if c.startswith('reflections')]
107
+ collections_to_search = list(set(project_collections + reflections_collections))
108
+ else:
109
+ collections_to_search = project_collections
110
+ else:
111
+ collections_to_search = all_collections
112
+ if not include_reflections:
113
+ collections_to_search = [c for c in collections_to_search if not c.startswith('reflections')]
114
+
115
+ await ctx.debug(f"Searching {len(collections_to_search)} collections for recent work")
116
+
117
+ # Collect recent chunks from all collections
118
+ all_chunks = []
119
+ for collection_name in collections_to_search:
120
+ try:
121
+ # Use scroll API with native order_by for efficient timestamp sorting
122
+ results, _ = await self.qdrant_client.scroll(
123
+ collection_name=collection_name,
124
+ limit=limit * 2, # Get more to allow for filtering
125
+ with_payload=True,
126
+ order_by=OrderBy(
127
+ key="timestamp",
128
+ direction="desc" # Most recent first
129
+ ) # Native Qdrant timestamp ordering
130
+ )
131
+
132
+ for point in results:
133
+ if point.payload:
134
+ chunk_data = {
135
+ 'id': str(point.id),
136
+ 'timestamp': point.payload.get('timestamp', datetime.now().isoformat()),
137
+ 'conversation_id': point.payload.get('conversation_id', str(point.id)),
138
+ 'project': point.payload.get('project', target_project),
139
+ 'text': point.payload.get('text', ''),
140
+ 'files_analyzed': point.payload.get('files_analyzed', []),
141
+ 'concepts': point.payload.get('concepts', []),
142
+ 'message_count': point.payload.get('total_messages', 1),
143
+ 'chunk_index': point.payload.get('chunk_index', 0)
144
+ }
145
+
146
+ # Filter by project if searching all collections but target is specific
147
+ if target_project != 'all' and not project_collections:
148
+ # Handle project matching - check if the target project name appears at the end of the stored project path
149
+ # The stored project name is like "-Users-username-projects-ShopifyMCPMockShop"
150
+ # We want to match just "ShopifyMCPMockShop"
151
+ # Also handle underscore/dash variations (procsolve-website vs procsolve_website)
152
+ point_project = chunk_data['project']
153
+ normalized_target = target_project.replace('-', '_')
154
+ normalized_stored = point_project.replace('-', '_')
155
+ if not (normalized_stored.endswith(f"_{normalized_target}") or
156
+ normalized_stored == normalized_target or
157
+ point_project.endswith(f"-{target_project}") or
158
+ point_project == target_project):
159
+ continue
160
+
161
+ all_chunks.append(chunk_data)
162
+
163
+ except Exception as e:
164
+ await ctx.debug(f"Error reading {collection_name}: {e}")
165
+ continue
166
+
167
+ if not all_chunks:
168
+ return "<no_results>No recent conversations found.</no_results>"
169
+
170
+ # Sort by timestamp
171
+ parser = TemporalParser()
172
+ all_chunks.sort(key=lambda x: x['timestamp'], reverse=True)
173
+
174
+ # Apply grouping strategy
175
+ if group_by == "session":
176
+ detector = SessionDetector()
177
+ sessions = detector.detect_sessions(all_chunks[:limit * 3]) # Get more chunks for session detection
178
+
179
+ result = f"<recent_work sessions='{len(sessions[:limit])}'>\n"
180
+ for session in sessions[:limit]:
181
+ relative_time = parser.format_relative_time(session.start_time)
182
+ result += f" <session time='{relative_time}' duration='{session.duration_minutes}min' project='{session.project}'>\n"
183
+ result += f" <topics>{', '.join(session.main_topics[:5])}</topics>\n"
184
+ if session.files_touched:
185
+ result += f" <files>{', '.join(session.files_touched[:5])}</files>\n"
186
+ result += f" <stats messages='{session.message_count}' conversations='{len(session.conversation_ids)}'/>\n"
187
+ result += f" </session>\n"
188
+ result += "</recent_work>"
189
+
190
+ elif group_by == "day":
191
+ grouped = group_by_time_period(all_chunks[:limit * 2], granularity='day')
192
+
193
+ result = f"<recent_work days='{len(grouped)}'>\n"
194
+ for day_key in sorted(grouped.keys(), reverse=True)[:limit]:
195
+ day_chunks = grouped[day_key]
196
+
197
+ # Aggregate day statistics
198
+ projects = list(set(c['project'] for c in day_chunks))
199
+ concepts = []
200
+ files = []
201
+ for chunk in day_chunks:
202
+ concepts.extend(chunk.get('concepts', []))
203
+ files.extend(chunk.get('files_analyzed', []))
204
+
205
+ # Get most common concepts
206
+ concept_counts = Counter(concepts)
207
+ top_concepts = [c for c, _ in concept_counts.most_common(5)]
208
+
209
+ result += f" <day date='{day_key}' conversations='{len(day_chunks)}'>\n"
210
+ result += f" <projects>{', '.join(projects)}</projects>\n"
211
+ result += f" <topics>{', '.join(top_concepts)}</topics>\n"
212
+ if files:
213
+ unique_files = list(set(files))[:5]
214
+ result += f" <files>{', '.join(unique_files)}</files>\n"
215
+ result += f" </day>\n"
216
+ result += "</recent_work>"
217
+
218
+ else: # Default: group by conversation
219
+ # Group chunks by conversation_id
220
+ conversations = {}
221
+ for chunk in all_chunks:
222
+ conv_id = chunk.get('conversation_id')
223
+ if conv_id not in conversations:
224
+ conversations[conv_id] = []
225
+ conversations[conv_id].append(chunk)
226
+
227
+ # Sort conversations by most recent chunk
228
+ sorted_convs = sorted(
229
+ conversations.items(),
230
+ key=lambda x: max(c['timestamp'] for c in x[1]),
231
+ reverse=True
232
+ )
233
+
234
+ result = f"<recent_work conversations='{min(len(sorted_convs), limit)}'>\n"
235
+ for conv_id, chunks in sorted_convs[:limit]:
236
+ most_recent = max(chunks, key=lambda x: x['timestamp'])
237
+ relative_time = parser.format_relative_time(most_recent['timestamp'])
238
+
239
+ # Get conversation summary
240
+ text_preview = most_recent.get('text', '')[:200]
241
+ if len(most_recent.get('text', '')) > 200:
242
+ text_preview += '...'
243
+
244
+ result += f" <conversation id='{conv_id}' time='{relative_time}' project='{most_recent['project']}'>\n"
245
+ result += f" <preview>{text_preview}</preview>\n"
246
+
247
+ # Aggregate concepts and files
248
+ all_concepts = []
249
+ all_files = []
250
+ for chunk in chunks:
251
+ all_concepts.extend(chunk.get('concepts', []))
252
+ all_files.extend(chunk.get('files_analyzed', []))
253
+
254
+ if all_concepts:
255
+ unique_concepts = list(set(all_concepts))[:5]
256
+ result += f" <topics>{', '.join(unique_concepts)}</topics>\n"
257
+ if all_files:
258
+ unique_files = list(set(all_files))[:3]
259
+ result += f" <files>{', '.join(unique_files)}</files>\n"
260
+
261
+ result += f" </conversation>\n"
262
+
263
+ result += "</recent_work>"
264
+
265
+ return result
266
+
267
+ except Exception as e:
268
+ logger.error(f"Error in get_recent_work: {e}", exc_info=True)
269
+ return f"<error>Failed to get recent work: {str(e)}</error>"
270
+
271
+ async def search_by_recency(
272
+ self,
273
+ ctx: Context,
274
+ query: str = Field(description="Semantic search query"),
275
+ time_range: Optional[str] = Field(default=None, description="Natural language time like 'last week', 'yesterday'"),
276
+ since: Optional[str] = Field(default=None, description="ISO timestamp or relative time"),
277
+ until: Optional[str] = Field(default=None, description="ISO timestamp or relative time"),
278
+ limit: int = Field(default=10, description="Maximum number of results"),
279
+ min_score: float = Field(default=0.3, description="Minimum similarity score"),
280
+ project: Optional[str] = Field(default=None, description="Specific project or 'all'")
281
+ ) -> str:
282
+ """Time-constrained semantic search for queries like 'docker issues last week'."""
283
+
284
+ await ctx.debug(f"Search by recency: query='{query}', time_range='{time_range}'")
285
+
286
+ try:
287
+ # Parse time constraints
288
+ parser = TemporalParser()
289
+
290
+ if time_range:
291
+ start_time, end_time = parser.parse_time_expression(time_range)
292
+ elif since or until:
293
+ if since:
294
+ start_time, _ = parser.parse_time_expression(since) if isinstance(since, str) else (since, since)
295
+ else:
296
+ start_time = datetime.now(timezone.utc) - timedelta(days=30) # Default 30 days back
297
+
298
+ if until:
299
+ _, end_time = parser.parse_time_expression(until) if isinstance(until, str) else (until, until)
300
+ else:
301
+ end_time = datetime.now(timezone.utc)
302
+ else:
303
+ # Default to last 7 days
304
+ start_time = datetime.now(timezone.utc) - timedelta(days=7)
305
+ end_time = datetime.now(timezone.utc)
306
+
307
+ await ctx.debug(f"Time range: {start_time.isoformat()} to {end_time.isoformat()}")
308
+
309
+ # Build filter for Qdrant using DatetimeRange (v1.8.0+)
310
+ time_filter = models.Filter(
311
+ must=[
312
+ models.FieldCondition(
313
+ key="timestamp",
314
+ range=models.DatetimeRange(
315
+ gte=start_time.isoformat(),
316
+ lte=end_time.isoformat()
317
+ )
318
+ )
319
+ ]
320
+ )
321
+
322
+ # Get embeddings for query
323
+ if not self.initialize_embeddings():
324
+ return "<error>Failed to initialize embedding models</error>"
325
+
326
+ # Generate embeddings per collection type
327
+ query_embeddings = {}
328
+
329
+ # Determine collections to search
330
+ target_project = project
331
+ if project is None:
332
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
333
+ path_parts = Path(cwd).parts
334
+ if 'projects' in path_parts:
335
+ idx = path_parts.index('projects')
336
+ if idx + 1 < len(path_parts):
337
+ target_project = path_parts[idx + 1]
338
+ if target_project is None:
339
+ target_project = Path(cwd).name
340
+
341
+ # Get collections
342
+ all_collections = await self.get_all_collections()
343
+ if target_project != 'all':
344
+ from qdrant_client import QdrantClient as SyncQdrantClient
345
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
346
+ resolver = ProjectResolver(sync_client)
347
+ collections_to_search = resolver.find_collections_for_project(target_project)
348
+ else:
349
+ collections_to_search = all_collections
350
+
351
+ # Search each collection with time filter
352
+ all_results = []
353
+ for collection_name in collections_to_search:
354
+ try:
355
+ # Determine embedding type for this collection
356
+ is_local = collection_name.endswith('_local')
357
+ embedding_type = 'local' if is_local else 'voyage'
358
+
359
+ # Generate or retrieve cached embedding for this type
360
+ if embedding_type not in query_embeddings:
361
+ try:
362
+ query_embeddings[embedding_type] = await self.generate_embedding(query, force_type=embedding_type)
363
+ except Exception as e:
364
+ await ctx.debug(f"Failed to generate {embedding_type} embedding: {e}")
365
+ continue
366
+
367
+ collection_embedding = query_embeddings[embedding_type]
368
+
369
+ results = await self.qdrant_client.search(
370
+ collection_name=collection_name,
371
+ query_vector=collection_embedding,
372
+ query_filter=time_filter,
373
+ limit=limit,
374
+ score_threshold=min_score,
375
+ with_payload=True
376
+ )
377
+
378
+ for point in results:
379
+ result_data = {
380
+ 'score': point.score,
381
+ 'timestamp': point.payload.get('timestamp'),
382
+ 'text': point.payload.get('text', ''),
383
+ 'project': point.payload.get('project'),
384
+ 'conversation_id': point.payload.get('conversation_id'),
385
+ 'files_analyzed': point.payload.get('files_analyzed', []),
386
+ 'concepts': point.payload.get('concepts', [])
387
+ }
388
+ all_results.append(result_data)
389
+
390
+ except Exception as e:
391
+ await ctx.debug(f"Error searching {collection_name}: {e}")
392
+ continue
393
+
394
+ # Sort by score
395
+ all_results.sort(key=lambda x: x['score'], reverse=True)
396
+ all_results = all_results[:limit]
397
+
398
+ if not all_results:
399
+ return f"<no_results>No results found for '{query}' in the specified time range.</no_results>"
400
+
401
+ # Format results
402
+ result = f"<search_results query='{query}' time_range='{start_time.date()} to {end_time.date()}' count='{len(all_results)}'>\n"
403
+
404
+ for idx, res in enumerate(all_results, 1):
405
+ relative_time = parser.format_relative_time(res['timestamp'])
406
+ text_preview = res['text'][:200] + '...' if len(res['text']) > 200 else res['text']
407
+
408
+ result += f" <result rank='{idx}' score='{res['score']:.3f}' time='{relative_time}'>\n"
409
+ result += f" <preview>{text_preview}</preview>\n"
410
+
411
+ if res.get('concepts'):
412
+ result += f" <topics>{', '.join(res['concepts'][:5])}</topics>\n"
413
+ if res.get('files_analyzed'):
414
+ result += f" <files>{', '.join(res['files_analyzed'][:3])}</files>\n"
415
+
416
+ result += f" <conversation_id>{res['conversation_id']}</conversation_id>\n"
417
+ result += f" </result>\n"
418
+
419
+ result += "</search_results>"
420
+ return result
421
+
422
+ except Exception as e:
423
+ logger.error(f"Error in search_by_recency: {e}", exc_info=True)
424
+ return f"<error>Search failed: {str(e)}</error>"
425
+
426
+ async def get_timeline(
427
+ self,
428
+ ctx: Context,
429
+ time_range: str = Field(default="last week", description="Natural language time range"),
430
+ project: Optional[str] = Field(default=None, description="Specific project or 'all'"),
431
+ granularity: str = Field(default="day", description="'hour', 'day', 'week', or 'month'"),
432
+ include_stats: bool = Field(default=True, description="Include activity statistics")
433
+ ) -> str:
434
+ """Show activity timeline for a project or across all projects."""
435
+
436
+ await ctx.debug(f"Getting timeline: time_range='{time_range}', granularity='{granularity}'")
437
+
438
+ try:
439
+ # Parse time range
440
+ parser = TemporalParser()
441
+ start_time, end_time = parser.parse_time_expression(time_range)
442
+
443
+ # Determine project scope
444
+ target_project = project
445
+ if project is None:
446
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
447
+ path_parts = Path(cwd).parts
448
+ if 'projects' in path_parts:
449
+ idx = path_parts.index('projects')
450
+ if idx + 1 < len(path_parts):
451
+ target_project = path_parts[idx + 1]
452
+ if target_project is None:
453
+ target_project = Path(cwd).name
454
+
455
+ # Get collections
456
+ all_collections = await self.get_all_collections()
457
+ if target_project != 'all':
458
+ from qdrant_client import QdrantClient as SyncQdrantClient
459
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
460
+ resolver = ProjectResolver(sync_client)
461
+ collections_to_search = resolver.find_collections_for_project(target_project)
462
+ else:
463
+ collections_to_search = all_collections
464
+
465
+ # Build time filter using DatetimeRange (v1.8.0+)
466
+ time_filter = models.Filter(
467
+ must=[
468
+ models.FieldCondition(
469
+ key="timestamp",
470
+ range=models.DatetimeRange(
471
+ gte=start_time.isoformat(),
472
+ lte=end_time.isoformat()
473
+ )
474
+ )
475
+ ]
476
+ )
477
+
478
+ # Collect all chunks in time range
479
+ all_chunks = []
480
+ for collection_name in collections_to_search:
481
+ try:
482
+ # Use scroll with native order_by and time filter for efficient retrieval
483
+ results, _ = await self.qdrant_client.scroll(
484
+ collection_name=collection_name,
485
+ scroll_filter=time_filter,
486
+ limit=1000, # Get many items for timeline
487
+ with_payload=True,
488
+ order_by=OrderBy(
489
+ key="timestamp",
490
+ direction="desc" # Most recent first
491
+ ) # Native Qdrant timestamp ordering
492
+ )
493
+
494
+ for point in results:
495
+ if point.payload:
496
+ chunk_data = {
497
+ 'timestamp': point.payload.get('timestamp'),
498
+ 'project': point.payload.get('project', target_project),
499
+ 'conversation_id': point.payload.get('conversation_id'),
500
+ 'files_analyzed': point.payload.get('files_analyzed', []),
501
+ 'files_edited': point.payload.get('files_edited', []),
502
+ 'concepts': point.payload.get('concepts', []),
503
+ 'tools_used': point.payload.get('tools_used', []),
504
+ 'message_count': point.payload.get('total_messages', 1)
505
+ }
506
+ all_chunks.append(chunk_data)
507
+
508
+ except Exception as e:
509
+ await ctx.debug(f"Error reading {collection_name}: {e}")
510
+ continue
511
+
512
+ if not all_chunks:
513
+ return f"<timeline>No activity found in the specified time range.</timeline>"
514
+
515
+ # Group by time period
516
+ grouped = group_by_time_period(all_chunks, granularity=granularity)
517
+
518
+ # Generate timeline
519
+ result = f"<timeline range='{start_time.date()} to {end_time.date()}' periods='{len(grouped)}'>\n"
520
+
521
+ for period_key in sorted(grouped.keys()):
522
+ period_chunks = grouped[period_key]
523
+
524
+ result += f" <period key='{period_key}' conversations='{len(period_chunks)}'>\n"
525
+
526
+ if include_stats:
527
+ # Calculate statistics
528
+ all_files = []
529
+ all_concepts = []
530
+ all_tools = []
531
+ message_count = 0
532
+
533
+ for chunk in period_chunks:
534
+ all_files.extend(chunk.get('files_analyzed', []))
535
+ all_files.extend(chunk.get('files_edited', []))
536
+ all_concepts.extend(chunk.get('concepts', []))
537
+ all_tools.extend(chunk.get('tools_used', []))
538
+ message_count += chunk.get('message_count', 1)
539
+
540
+ # Get unique counts and top items
541
+ file_counts = Counter(all_files)
542
+ concept_counts = Counter(all_concepts)
543
+ tool_counts = Counter(all_tools)
544
+
545
+ result += f" <stats messages='{message_count}' unique_files='{len(file_counts)}' unique_concepts='{len(concept_counts)}'/>\n"
546
+
547
+ if concept_counts:
548
+ top_concepts = [c for c, _ in concept_counts.most_common(5)]
549
+ result += f" <top_topics>{', '.join(top_concepts)}</top_topics>\n"
550
+
551
+ if file_counts:
552
+ top_files = [f for f, _ in file_counts.most_common(3)]
553
+ result += f" <active_files>{', '.join(top_files)}</active_files>\n"
554
+
555
+ result += f" </period>\n"
556
+
557
+ result += "</timeline>"
558
+ return result
559
+
560
+ except Exception as e:
561
+ logger.error(f"Error in get_timeline: {e}", exc_info=True)
562
+ return f"<error>Failed to generate timeline: {str(e)}</error>"
563
+
564
+
565
+ def register_temporal_tools(
566
+ mcp,
567
+ qdrant_client,
568
+ qdrant_url,
569
+ get_all_collections_func,
570
+ generate_embedding_func,
571
+ initialize_embeddings_func,
572
+ normalize_project_name_func
573
+ ):
574
+ """
575
+ Register temporal tools with the MCP server.
576
+
577
+ Args:
578
+ mcp: FastMCP instance
579
+ qdrant_client: Async Qdrant client
580
+ qdrant_url: Qdrant server URL
581
+ get_all_collections_func: Function to get all collections
582
+ generate_embedding_func: Function to generate embeddings
583
+ initialize_embeddings_func: Function to initialize embeddings
584
+ normalize_project_name_func: Function to normalize project names
585
+
586
+ Returns:
587
+ TemporalTools instance
588
+ """
589
+ # Create temporal tools instance with the provided functions
590
+ tools = TemporalTools(
591
+ qdrant_client=qdrant_client,
592
+ qdrant_url=qdrant_url,
593
+ get_all_collections_func=get_all_collections_func,
594
+ generate_embedding_func=generate_embedding_func,
595
+ initialize_embeddings_func=initialize_embeddings_func,
596
+ normalize_project_name_func=normalize_project_name_func
597
+ )
598
+
599
+ # Register the tools with MCP
600
+ mcp.tool()(tools.get_recent_work)
601
+ mcp.tool()(tools.search_by_recency)
602
+ mcp.tool()(tools.get_timeline)
603
+
604
+ return tools