claude-self-reflect 3.2.4 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +595 -528
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/mcp-server/run-mcp.sh +49 -5
  5. package/mcp-server/src/app_context.py +64 -0
  6. package/mcp-server/src/config.py +57 -0
  7. package/mcp-server/src/connection_pool.py +286 -0
  8. package/mcp-server/src/decay_manager.py +106 -0
  9. package/mcp-server/src/embedding_manager.py +64 -40
  10. package/mcp-server/src/embeddings_old.py +141 -0
  11. package/mcp-server/src/models.py +64 -0
  12. package/mcp-server/src/parallel_search.py +371 -0
  13. package/mcp-server/src/project_resolver.py +5 -0
  14. package/mcp-server/src/reflection_tools.py +206 -0
  15. package/mcp-server/src/rich_formatting.py +196 -0
  16. package/mcp-server/src/search_tools.py +826 -0
  17. package/mcp-server/src/server.py +127 -1720
  18. package/mcp-server/src/temporal_design.py +132 -0
  19. package/mcp-server/src/temporal_tools.py +597 -0
  20. package/mcp-server/src/temporal_utils.py +384 -0
  21. package/mcp-server/src/utils.py +150 -67
  22. package/package.json +10 -1
  23. package/scripts/add-timestamp-indexes.py +134 -0
  24. package/scripts/check-collections.py +29 -0
  25. package/scripts/debug-august-parsing.py +76 -0
  26. package/scripts/debug-import-single.py +91 -0
  27. package/scripts/debug-project-resolver.py +82 -0
  28. package/scripts/debug-temporal-tools.py +135 -0
  29. package/scripts/delta-metadata-update.py +547 -0
  30. package/scripts/import-conversations-unified.py +53 -2
  31. package/scripts/precompact-hook.sh +33 -0
  32. package/scripts/streaming-watcher.py +1443 -0
  33. package/scripts/utils.py +39 -0
@@ -0,0 +1,597 @@
1
+ """
2
+ Temporal tools for Claude Self-Reflect MCP server.
3
+ Contains MCP tools for time-based queries and work session tracking.
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ import hashlib
9
+ from pathlib import Path
10
+ from typing import Optional, List, Dict, Any
11
+ from datetime import datetime, timezone, timedelta
12
+ from collections import Counter, defaultdict
13
+
14
+ from fastmcp import Context
15
+ from pydantic import Field
16
+ from qdrant_client import AsyncQdrantClient, models
17
+ from qdrant_client.models import OrderBy
18
+
19
+ from .temporal_utils import SessionDetector, TemporalParser, group_by_time_period
20
+ from .project_resolver import ProjectResolver
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class TemporalTools:
26
+ """Temporal query tools for MCP server."""
27
+
28
+ def __init__(self,
29
+ qdrant_client: AsyncQdrantClient,
30
+ qdrant_url: str,
31
+ get_all_collections_func,
32
+ generate_embedding_func,
33
+ initialize_embeddings_func,
34
+ normalize_project_name_func):
35
+ """
36
+ Initialize temporal tools.
37
+
38
+ Args:
39
+ qdrant_client: Async Qdrant client
40
+ qdrant_url: Qdrant server URL
41
+ get_all_collections_func: Function to get all collections
42
+ generate_embedding_func: Function to generate embeddings
43
+ initialize_embeddings_func: Function to initialize embeddings
44
+ normalize_project_name_func: Function to normalize project names
45
+ """
46
+ self.qdrant_client = qdrant_client
47
+ self.qdrant_url = qdrant_url
48
+ self.get_all_collections = get_all_collections_func
49
+ self.generate_embedding = generate_embedding_func
50
+ self.initialize_embeddings = initialize_embeddings_func
51
+ self.normalize_project_name = normalize_project_name_func
52
+
53
+ async def get_recent_work(
54
+ self,
55
+ ctx: Context,
56
+ limit: int = Field(default=10, description="Number of recent conversations to return"),
57
+ project: Optional[str] = Field(default=None, description="Specific project or 'all' for cross-project"),
58
+ include_reflections: bool = Field(default=True, description="Include stored reflections"),
59
+ group_by: str = Field(default="conversation", description="Group by 'conversation', 'day', or 'session'")
60
+ ) -> str:
61
+ """Get recent work conversations to answer 'What did we work on last?' queries."""
62
+
63
+ await ctx.debug(f"Getting recent work: limit={limit}, project={project}, group_by={group_by}")
64
+
65
+ try:
66
+ # Determine project scope
67
+ target_project = project
68
+ if project is None:
69
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
70
+ path_parts = Path(cwd).parts
71
+ if 'projects' in path_parts:
72
+ idx = path_parts.index('projects')
73
+ if idx + 1 < len(path_parts):
74
+ target_project = path_parts[idx + 1]
75
+ if target_project is None:
76
+ target_project = Path(cwd).name
77
+
78
+ # Get all collections
79
+ all_collections = await self.get_all_collections()
80
+ if not all_collections:
81
+ return "<error>No conversation collections found. Please import conversations first.</error>"
82
+
83
+ # Filter collections by project
84
+ if target_project != 'all':
85
+ from qdrant_client import QdrantClient as SyncQdrantClient
86
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
87
+ resolver = ProjectResolver(sync_client)
88
+ project_collections = resolver.find_collections_for_project(target_project)
89
+
90
+ if not project_collections:
91
+ normalized_name = self.normalize_project_name(target_project)
92
+ project_hash = hashlib.md5(normalized_name.encode()).hexdigest()[:8]
93
+ project_collections = [
94
+ c for c in all_collections
95
+ if c.startswith(f"conv_{project_hash}_")
96
+ ]
97
+
98
+ if include_reflections:
99
+ reflections_collections = [c for c in all_collections if c.startswith('reflections')]
100
+ collections_to_search = list(set(project_collections + reflections_collections))
101
+ else:
102
+ collections_to_search = project_collections
103
+ else:
104
+ collections_to_search = all_collections
105
+ if not include_reflections:
106
+ collections_to_search = [c for c in collections_to_search if not c.startswith('reflections')]
107
+
108
+ await ctx.debug(f"Searching {len(collections_to_search)} collections for recent work")
109
+
110
+ # Collect recent chunks from all collections
111
+ all_chunks = []
112
+ for collection_name in collections_to_search:
113
+ try:
114
+ # Use scroll API with native order_by for efficient timestamp sorting
115
+ results, _ = await self.qdrant_client.scroll(
116
+ collection_name=collection_name,
117
+ limit=limit * 2, # Get more to allow for filtering
118
+ with_payload=True,
119
+ order_by=OrderBy(
120
+ key="timestamp",
121
+ direction="desc" # Most recent first
122
+ ) # Native Qdrant timestamp ordering
123
+ )
124
+
125
+ for point in results:
126
+ if point.payload:
127
+ chunk_data = {
128
+ 'id': str(point.id),
129
+ 'timestamp': point.payload.get('timestamp', datetime.now().isoformat()),
130
+ 'conversation_id': point.payload.get('conversation_id', str(point.id)),
131
+ 'project': point.payload.get('project', target_project),
132
+ 'text': point.payload.get('text', ''),
133
+ 'files_analyzed': point.payload.get('files_analyzed', []),
134
+ 'concepts': point.payload.get('concepts', []),
135
+ 'message_count': point.payload.get('total_messages', 1),
136
+ 'chunk_index': point.payload.get('chunk_index', 0)
137
+ }
138
+
139
+ # Filter by project if searching all collections but target is specific
140
+ if target_project != 'all' and not project_collections:
141
+ # Handle project matching - check if the target project name appears at the end of the stored project path
142
+ # The stored project name is like "-Users-username-projects-ShopifyMCPMockShop"
143
+ # We want to match just "ShopifyMCPMockShop"
144
+ # Also handle underscore/dash variations (procsolve-website vs procsolve_website)
145
+ point_project = chunk_data['project']
146
+ normalized_target = target_project.replace('-', '_')
147
+ normalized_stored = point_project.replace('-', '_')
148
+ if not (normalized_stored.endswith(f"_{normalized_target}") or
149
+ normalized_stored == normalized_target or
150
+ point_project.endswith(f"-{target_project}") or
151
+ point_project == target_project):
152
+ continue
153
+
154
+ all_chunks.append(chunk_data)
155
+
156
+ except Exception as e:
157
+ await ctx.debug(f"Error reading {collection_name}: {e}")
158
+ continue
159
+
160
+ if not all_chunks:
161
+ return "<no_results>No recent conversations found.</no_results>"
162
+
163
+ # Sort by timestamp
164
+ parser = TemporalParser()
165
+ all_chunks.sort(key=lambda x: x['timestamp'], reverse=True)
166
+
167
+ # Apply grouping strategy
168
+ if group_by == "session":
169
+ detector = SessionDetector()
170
+ sessions = detector.detect_sessions(all_chunks[:limit * 3]) # Get more chunks for session detection
171
+
172
+ result = f"<recent_work sessions='{len(sessions[:limit])}'>\n"
173
+ for session in sessions[:limit]:
174
+ relative_time = parser.format_relative_time(session.start_time)
175
+ result += f" <session time='{relative_time}' duration='{session.duration_minutes}min' project='{session.project}'>\n"
176
+ result += f" <topics>{', '.join(session.main_topics[:5])}</topics>\n"
177
+ if session.files_touched:
178
+ result += f" <files>{', '.join(session.files_touched[:5])}</files>\n"
179
+ result += f" <stats messages='{session.message_count}' conversations='{len(session.conversation_ids)}'/>\n"
180
+ result += f" </session>\n"
181
+ result += "</recent_work>"
182
+
183
+ elif group_by == "day":
184
+ grouped = group_by_time_period(all_chunks[:limit * 2], granularity='day')
185
+
186
+ result = f"<recent_work days='{len(grouped)}'>\n"
187
+ for day_key in sorted(grouped.keys(), reverse=True)[:limit]:
188
+ day_chunks = grouped[day_key]
189
+
190
+ # Aggregate day statistics
191
+ projects = list(set(c['project'] for c in day_chunks))
192
+ concepts = []
193
+ files = []
194
+ for chunk in day_chunks:
195
+ concepts.extend(chunk.get('concepts', []))
196
+ files.extend(chunk.get('files_analyzed', []))
197
+
198
+ # Get most common concepts
199
+ concept_counts = Counter(concepts)
200
+ top_concepts = [c for c, _ in concept_counts.most_common(5)]
201
+
202
+ result += f" <day date='{day_key}' conversations='{len(day_chunks)}'>\n"
203
+ result += f" <projects>{', '.join(projects)}</projects>\n"
204
+ result += f" <topics>{', '.join(top_concepts)}</topics>\n"
205
+ if files:
206
+ unique_files = list(set(files))[:5]
207
+ result += f" <files>{', '.join(unique_files)}</files>\n"
208
+ result += f" </day>\n"
209
+ result += "</recent_work>"
210
+
211
+ else: # Default: group by conversation
212
+ # Group chunks by conversation_id
213
+ conversations = {}
214
+ for chunk in all_chunks:
215
+ conv_id = chunk.get('conversation_id')
216
+ if conv_id not in conversations:
217
+ conversations[conv_id] = []
218
+ conversations[conv_id].append(chunk)
219
+
220
+ # Sort conversations by most recent chunk
221
+ sorted_convs = sorted(
222
+ conversations.items(),
223
+ key=lambda x: max(c['timestamp'] for c in x[1]),
224
+ reverse=True
225
+ )
226
+
227
+ result = f"<recent_work conversations='{min(len(sorted_convs), limit)}'>\n"
228
+ for conv_id, chunks in sorted_convs[:limit]:
229
+ most_recent = max(chunks, key=lambda x: x['timestamp'])
230
+ relative_time = parser.format_relative_time(most_recent['timestamp'])
231
+
232
+ # Get conversation summary
233
+ text_preview = most_recent.get('text', '')[:200]
234
+ if len(most_recent.get('text', '')) > 200:
235
+ text_preview += '...'
236
+
237
+ result += f" <conversation id='{conv_id}' time='{relative_time}' project='{most_recent['project']}'>\n"
238
+ result += f" <preview>{text_preview}</preview>\n"
239
+
240
+ # Aggregate concepts and files
241
+ all_concepts = []
242
+ all_files = []
243
+ for chunk in chunks:
244
+ all_concepts.extend(chunk.get('concepts', []))
245
+ all_files.extend(chunk.get('files_analyzed', []))
246
+
247
+ if all_concepts:
248
+ unique_concepts = list(set(all_concepts))[:5]
249
+ result += f" <topics>{', '.join(unique_concepts)}</topics>\n"
250
+ if all_files:
251
+ unique_files = list(set(all_files))[:3]
252
+ result += f" <files>{', '.join(unique_files)}</files>\n"
253
+
254
+ result += f" </conversation>\n"
255
+
256
+ result += "</recent_work>"
257
+
258
+ return result
259
+
260
+ except Exception as e:
261
+ logger.error(f"Error in get_recent_work: {e}", exc_info=True)
262
+ return f"<error>Failed to get recent work: {str(e)}</error>"
263
+
264
+ async def search_by_recency(
265
+ self,
266
+ ctx: Context,
267
+ query: str = Field(description="Semantic search query"),
268
+ time_range: Optional[str] = Field(default=None, description="Natural language time like 'last week', 'yesterday'"),
269
+ since: Optional[str] = Field(default=None, description="ISO timestamp or relative time"),
270
+ until: Optional[str] = Field(default=None, description="ISO timestamp or relative time"),
271
+ limit: int = Field(default=10, description="Maximum number of results"),
272
+ min_score: float = Field(default=0.3, description="Minimum similarity score"),
273
+ project: Optional[str] = Field(default=None, description="Specific project or 'all'")
274
+ ) -> str:
275
+ """Time-constrained semantic search for queries like 'docker issues last week'."""
276
+
277
+ await ctx.debug(f"Search by recency: query='{query}', time_range='{time_range}'")
278
+
279
+ try:
280
+ # Parse time constraints
281
+ parser = TemporalParser()
282
+
283
+ if time_range:
284
+ start_time, end_time = parser.parse_time_expression(time_range)
285
+ elif since or until:
286
+ if since:
287
+ start_time, _ = parser.parse_time_expression(since) if isinstance(since, str) else (since, since)
288
+ else:
289
+ start_time = datetime.now(timezone.utc) - timedelta(days=30) # Default 30 days back
290
+
291
+ if until:
292
+ _, end_time = parser.parse_time_expression(until) if isinstance(until, str) else (until, until)
293
+ else:
294
+ end_time = datetime.now(timezone.utc)
295
+ else:
296
+ # Default to last 7 days
297
+ start_time = datetime.now(timezone.utc) - timedelta(days=7)
298
+ end_time = datetime.now(timezone.utc)
299
+
300
+ await ctx.debug(f"Time range: {start_time.isoformat()} to {end_time.isoformat()}")
301
+
302
+ # Build filter for Qdrant using DatetimeRange (v1.8.0+)
303
+ time_filter = models.Filter(
304
+ must=[
305
+ models.FieldCondition(
306
+ key="timestamp",
307
+ range=models.DatetimeRange(
308
+ gte=start_time.isoformat(),
309
+ lte=end_time.isoformat()
310
+ )
311
+ )
312
+ ]
313
+ )
314
+
315
+ # Get embeddings for query
316
+ if not self.initialize_embeddings():
317
+ return "<error>Failed to initialize embedding models</error>"
318
+
319
+ # Generate embeddings per collection type
320
+ query_embeddings = {}
321
+
322
+ # Determine collections to search
323
+ target_project = project
324
+ if project is None:
325
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
326
+ path_parts = Path(cwd).parts
327
+ if 'projects' in path_parts:
328
+ idx = path_parts.index('projects')
329
+ if idx + 1 < len(path_parts):
330
+ target_project = path_parts[idx + 1]
331
+ if target_project is None:
332
+ target_project = Path(cwd).name
333
+
334
+ # Get collections
335
+ all_collections = await self.get_all_collections()
336
+ if target_project != 'all':
337
+ from qdrant_client import QdrantClient as SyncQdrantClient
338
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
339
+ resolver = ProjectResolver(sync_client)
340
+ collections_to_search = resolver.find_collections_for_project(target_project)
341
+ else:
342
+ collections_to_search = all_collections
343
+
344
+ # Search each collection with time filter
345
+ all_results = []
346
+ for collection_name in collections_to_search:
347
+ try:
348
+ # Determine embedding type for this collection
349
+ is_local = collection_name.endswith('_local')
350
+ embedding_type = 'local' if is_local else 'voyage'
351
+
352
+ # Generate or retrieve cached embedding for this type
353
+ if embedding_type not in query_embeddings:
354
+ try:
355
+ query_embeddings[embedding_type] = await self.generate_embedding(query, force_type=embedding_type)
356
+ except Exception as e:
357
+ await ctx.debug(f"Failed to generate {embedding_type} embedding: {e}")
358
+ continue
359
+
360
+ collection_embedding = query_embeddings[embedding_type]
361
+
362
+ results = await self.qdrant_client.search(
363
+ collection_name=collection_name,
364
+ query_vector=collection_embedding,
365
+ query_filter=time_filter,
366
+ limit=limit,
367
+ score_threshold=min_score,
368
+ with_payload=True
369
+ )
370
+
371
+ for point in results:
372
+ result_data = {
373
+ 'score': point.score,
374
+ 'timestamp': point.payload.get('timestamp'),
375
+ 'text': point.payload.get('text', ''),
376
+ 'project': point.payload.get('project'),
377
+ 'conversation_id': point.payload.get('conversation_id'),
378
+ 'files_analyzed': point.payload.get('files_analyzed', []),
379
+ 'concepts': point.payload.get('concepts', [])
380
+ }
381
+ all_results.append(result_data)
382
+
383
+ except Exception as e:
384
+ await ctx.debug(f"Error searching {collection_name}: {e}")
385
+ continue
386
+
387
+ # Sort by score
388
+ all_results.sort(key=lambda x: x['score'], reverse=True)
389
+ all_results = all_results[:limit]
390
+
391
+ if not all_results:
392
+ return f"<no_results>No results found for '{query}' in the specified time range.</no_results>"
393
+
394
+ # Format results
395
+ result = f"<search_results query='{query}' time_range='{start_time.date()} to {end_time.date()}' count='{len(all_results)}'>\n"
396
+
397
+ for idx, res in enumerate(all_results, 1):
398
+ relative_time = parser.format_relative_time(res['timestamp'])
399
+ text_preview = res['text'][:200] + '...' if len(res['text']) > 200 else res['text']
400
+
401
+ result += f" <result rank='{idx}' score='{res['score']:.3f}' time='{relative_time}'>\n"
402
+ result += f" <preview>{text_preview}</preview>\n"
403
+
404
+ if res.get('concepts'):
405
+ result += f" <topics>{', '.join(res['concepts'][:5])}</topics>\n"
406
+ if res.get('files_analyzed'):
407
+ result += f" <files>{', '.join(res['files_analyzed'][:3])}</files>\n"
408
+
409
+ result += f" <conversation_id>{res['conversation_id']}</conversation_id>\n"
410
+ result += f" </result>\n"
411
+
412
+ result += "</search_results>"
413
+ return result
414
+
415
+ except Exception as e:
416
+ logger.error(f"Error in search_by_recency: {e}", exc_info=True)
417
+ return f"<error>Search failed: {str(e)}</error>"
418
+
419
+ async def get_timeline(
420
+ self,
421
+ ctx: Context,
422
+ time_range: str = Field(default="last week", description="Natural language time range"),
423
+ project: Optional[str] = Field(default=None, description="Specific project or 'all'"),
424
+ granularity: str = Field(default="day", description="'hour', 'day', 'week', or 'month'"),
425
+ include_stats: bool = Field(default=True, description="Include activity statistics")
426
+ ) -> str:
427
+ """Show activity timeline for a project or across all projects."""
428
+
429
+ await ctx.debug(f"Getting timeline: time_range='{time_range}', granularity='{granularity}'")
430
+
431
+ try:
432
+ # Parse time range
433
+ parser = TemporalParser()
434
+ start_time, end_time = parser.parse_time_expression(time_range)
435
+
436
+ # Determine project scope
437
+ target_project = project
438
+ if project is None:
439
+ cwd = os.environ.get('MCP_CLIENT_CWD', os.getcwd())
440
+ path_parts = Path(cwd).parts
441
+ if 'projects' in path_parts:
442
+ idx = path_parts.index('projects')
443
+ if idx + 1 < len(path_parts):
444
+ target_project = path_parts[idx + 1]
445
+ if target_project is None:
446
+ target_project = Path(cwd).name
447
+
448
+ # Get collections
449
+ all_collections = await self.get_all_collections()
450
+ if target_project != 'all':
451
+ from qdrant_client import QdrantClient as SyncQdrantClient
452
+ sync_client = SyncQdrantClient(url=self.qdrant_url)
453
+ resolver = ProjectResolver(sync_client)
454
+ collections_to_search = resolver.find_collections_for_project(target_project)
455
+ else:
456
+ collections_to_search = all_collections
457
+
458
+ # Build time filter using DatetimeRange (v1.8.0+)
459
+ time_filter = models.Filter(
460
+ must=[
461
+ models.FieldCondition(
462
+ key="timestamp",
463
+ range=models.DatetimeRange(
464
+ gte=start_time.isoformat(),
465
+ lte=end_time.isoformat()
466
+ )
467
+ )
468
+ ]
469
+ )
470
+
471
+ # Collect all chunks in time range
472
+ all_chunks = []
473
+ for collection_name in collections_to_search:
474
+ try:
475
+ # Use scroll with native order_by and time filter for efficient retrieval
476
+ results, _ = await self.qdrant_client.scroll(
477
+ collection_name=collection_name,
478
+ scroll_filter=time_filter,
479
+ limit=1000, # Get many items for timeline
480
+ with_payload=True,
481
+ order_by=OrderBy(
482
+ key="timestamp",
483
+ direction="desc" # Most recent first
484
+ ) # Native Qdrant timestamp ordering
485
+ )
486
+
487
+ for point in results:
488
+ if point.payload:
489
+ chunk_data = {
490
+ 'timestamp': point.payload.get('timestamp'),
491
+ 'project': point.payload.get('project', target_project),
492
+ 'conversation_id': point.payload.get('conversation_id'),
493
+ 'files_analyzed': point.payload.get('files_analyzed', []),
494
+ 'files_edited': point.payload.get('files_edited', []),
495
+ 'concepts': point.payload.get('concepts', []),
496
+ 'tools_used': point.payload.get('tools_used', []),
497
+ 'message_count': point.payload.get('total_messages', 1)
498
+ }
499
+ all_chunks.append(chunk_data)
500
+
501
+ except Exception as e:
502
+ await ctx.debug(f"Error reading {collection_name}: {e}")
503
+ continue
504
+
505
+ if not all_chunks:
506
+ return f"<timeline>No activity found in the specified time range.</timeline>"
507
+
508
+ # Group by time period
509
+ grouped = group_by_time_period(all_chunks, granularity=granularity)
510
+
511
+ # Generate timeline
512
+ result = f"<timeline range='{start_time.date()} to {end_time.date()}' periods='{len(grouped)}'>\n"
513
+
514
+ for period_key in sorted(grouped.keys()):
515
+ period_chunks = grouped[period_key]
516
+
517
+ result += f" <period key='{period_key}' conversations='{len(period_chunks)}'>\n"
518
+
519
+ if include_stats:
520
+ # Calculate statistics
521
+ all_files = []
522
+ all_concepts = []
523
+ all_tools = []
524
+ message_count = 0
525
+
526
+ for chunk in period_chunks:
527
+ all_files.extend(chunk.get('files_analyzed', []))
528
+ all_files.extend(chunk.get('files_edited', []))
529
+ all_concepts.extend(chunk.get('concepts', []))
530
+ all_tools.extend(chunk.get('tools_used', []))
531
+ message_count += chunk.get('message_count', 1)
532
+
533
+ # Get unique counts and top items
534
+ file_counts = Counter(all_files)
535
+ concept_counts = Counter(all_concepts)
536
+ tool_counts = Counter(all_tools)
537
+
538
+ result += f" <stats messages='{message_count}' unique_files='{len(file_counts)}' unique_concepts='{len(concept_counts)}'/>\n"
539
+
540
+ if concept_counts:
541
+ top_concepts = [c for c, _ in concept_counts.most_common(5)]
542
+ result += f" <top_topics>{', '.join(top_concepts)}</top_topics>\n"
543
+
544
+ if file_counts:
545
+ top_files = [f for f, _ in file_counts.most_common(3)]
546
+ result += f" <active_files>{', '.join(top_files)}</active_files>\n"
547
+
548
+ result += f" </period>\n"
549
+
550
+ result += "</timeline>"
551
+ return result
552
+
553
+ except Exception as e:
554
+ logger.error(f"Error in get_timeline: {e}", exc_info=True)
555
+ return f"<error>Failed to generate timeline: {str(e)}</error>"
556
+
557
+
558
+ def register_temporal_tools(
559
+ mcp,
560
+ qdrant_client,
561
+ qdrant_url,
562
+ get_all_collections_func,
563
+ generate_embedding_func,
564
+ initialize_embeddings_func,
565
+ normalize_project_name_func
566
+ ):
567
+ """
568
+ Register temporal tools with the MCP server.
569
+
570
+ Args:
571
+ mcp: FastMCP instance
572
+ qdrant_client: Async Qdrant client
573
+ qdrant_url: Qdrant server URL
574
+ get_all_collections_func: Function to get all collections
575
+ generate_embedding_func: Function to generate embeddings
576
+ initialize_embeddings_func: Function to initialize embeddings
577
+ normalize_project_name_func: Function to normalize project names
578
+
579
+ Returns:
580
+ TemporalTools instance
581
+ """
582
+ # Create temporal tools instance with the provided functions
583
+ tools = TemporalTools(
584
+ qdrant_client=qdrant_client,
585
+ qdrant_url=qdrant_url,
586
+ get_all_collections_func=get_all_collections_func,
587
+ generate_embedding_func=generate_embedding_func,
588
+ initialize_embeddings_func=initialize_embeddings_func,
589
+ normalize_project_name_func=normalize_project_name_func
590
+ )
591
+
592
+ # Register the tools with MCP
593
+ mcp.tool()(tools.get_recent_work)
594
+ mcp.tool()(tools.search_by_recency)
595
+ mcp.tool()(tools.get_timeline)
596
+
597
+ return tools