repr-cli 0.2.15__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. repr/__init__.py +1 -1
  2. repr/api.py +363 -62
  3. repr/auth.py +47 -38
  4. repr/change_synthesis.py +478 -0
  5. repr/cli.py +4103 -267
  6. repr/config.py +119 -11
  7. repr/configure.py +889 -0
  8. repr/cron.py +419 -0
  9. repr/dashboard/__init__.py +9 -0
  10. repr/dashboard/build.py +126 -0
  11. repr/dashboard/dist/assets/index-BYFVbEev.css +1 -0
  12. repr/dashboard/dist/assets/index-BrrhyJFO.css +1 -0
  13. repr/dashboard/dist/assets/index-CcEg74ts.js +270 -0
  14. repr/dashboard/dist/assets/index-Cerc-iA_.js +377 -0
  15. repr/dashboard/dist/assets/index-CjVcBW2L.css +1 -0
  16. repr/dashboard/dist/assets/index-Dfl3mR5E.js +377 -0
  17. repr/dashboard/dist/favicon.svg +4 -0
  18. repr/dashboard/dist/index.html +14 -0
  19. repr/dashboard/manager.py +234 -0
  20. repr/dashboard/server.py +1298 -0
  21. repr/db.py +980 -0
  22. repr/hooks.py +3 -2
  23. repr/loaders/__init__.py +22 -0
  24. repr/loaders/base.py +156 -0
  25. repr/loaders/claude_code.py +287 -0
  26. repr/loaders/clawdbot.py +313 -0
  27. repr/loaders/gemini_antigravity.py +381 -0
  28. repr/mcp_server.py +1196 -0
  29. repr/models.py +503 -0
  30. repr/openai_analysis.py +25 -0
  31. repr/session_extractor.py +481 -0
  32. repr/storage.py +360 -0
  33. repr/story_synthesis.py +1296 -0
  34. repr/templates.py +68 -4
  35. repr/timeline.py +710 -0
  36. repr/tools.py +17 -8
  37. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/METADATA +50 -10
  38. repr_cli-0.2.17.dist-info/RECORD +52 -0
  39. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/WHEEL +1 -1
  40. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/entry_points.txt +1 -0
  41. repr_cli-0.2.15.dist-info/RECORD +0 -26
  42. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/licenses/LICENSE +0 -0
  43. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/top_level.txt +0 -0
repr/mcp_server.py ADDED
@@ -0,0 +1,1196 @@
1
+ """
2
+ MCP (Model Context Protocol) server for repr.
3
+
4
+ Exposes repr functionality as tools and resources for AI agents like
5
+ Claude Code, Cursor, Windsurf, and other MCP-compatible clients.
6
+
7
+ Usage:
8
+ repr mcp serve # Start MCP server (stdio mode)
9
+ repr mcp serve --sse # SSE mode for remote clients
10
+ repr mcp serve --port 3001 # Custom port for SSE
11
+ """
12
+
13
+ import asyncio
14
+ import json
15
+ from datetime import datetime, timedelta
16
+ from pathlib import Path
17
+ from typing import Optional
18
+
19
+ from fastmcp import FastMCP
20
+
21
+ from .config import (
22
+ get_tracked_repos,
23
+ get_profile_config,
24
+ load_config,
25
+ )
26
+ from .storage import (
27
+ list_stories,
28
+ load_story,
29
+ STORIES_DIR,
30
+ )
31
+
32
+ # Create the MCP server
33
+ mcp = FastMCP(
34
+ "repr",
35
+ instructions="Developer identity tool — stories from git history. "
36
+ "Generate compelling narratives from your commits, build your developer profile, "
37
+ "and create content from your work.",
38
+ )
39
+
40
+
41
+ # =============================================================================
42
+ # HELPER FUNCTIONS
43
+ # =============================================================================
44
+
45
+ def _parse_date_reference(date_str: str) -> str | None:
46
+ """Parse a date reference string into an ISO date string."""
47
+ import re
48
+
49
+ date_str = date_str.lower().strip()
50
+
51
+ # Try ISO format first
52
+ try:
53
+ parsed = datetime.fromisoformat(date_str)
54
+ return parsed.isoformat()
55
+ except ValueError:
56
+ pass
57
+
58
+ # Day names
59
+ day_names = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
60
+ if date_str in day_names:
61
+ today = datetime.now()
62
+ target_day = day_names.index(date_str)
63
+ current_day = today.weekday()
64
+ days_back = (current_day - target_day) % 7
65
+ if days_back == 0:
66
+ days_back = 7
67
+ target_date = today - timedelta(days=days_back)
68
+ return target_date.replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
69
+
70
+ # Relative time
71
+ match = re.match(r"(\d+)\s+(day|days|week|weeks|month|months)\s+ago", date_str)
72
+ if match:
73
+ amount = int(match.group(1))
74
+ unit = match.group(2).rstrip("s")
75
+ if unit == "day":
76
+ delta = timedelta(days=amount)
77
+ elif unit == "week":
78
+ delta = timedelta(weeks=amount)
79
+ elif unit == "month":
80
+ delta = timedelta(days=amount * 30)
81
+ else:
82
+ return None
83
+ target_date = datetime.now() - delta
84
+ return target_date.replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
85
+
86
+ # Yesterday/today
87
+ if date_str == "yesterday":
88
+ target_date = datetime.now() - timedelta(days=1)
89
+ return target_date.replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
90
+ if date_str == "today":
91
+ return datetime.now().replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
92
+
93
+ # Last week/month
94
+ if date_str == "last week":
95
+ target_date = datetime.now() - timedelta(weeks=1)
96
+ return target_date.replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
97
+ if date_str == "last month":
98
+ target_date = datetime.now() - timedelta(days=30)
99
+ return target_date.replace(hour=0, minute=0, second=0, microsecond=0).isoformat()
100
+
101
+ return None
102
+
103
+
104
+ def _get_commits_for_period(
105
+ since: str = "7 days ago",
106
+ repo_path: Optional[str] = None,
107
+ ) -> list[dict]:
108
+ """Get commits for a time period across tracked repos."""
109
+ from .tools import get_commits_with_diffs
110
+ from .discovery import analyze_repo
111
+
112
+ since_str = _parse_date_reference(since)
113
+ if not since_str:
114
+ since_str = _parse_date_reference("7 days ago")
115
+
116
+ if repo_path:
117
+ repo_paths = [Path(repo_path)]
118
+ else:
119
+ tracked = get_tracked_repos()
120
+ repo_paths = [Path(r["path"]) for r in tracked if Path(r["path"]).exists()]
121
+
122
+ all_commits = []
123
+ for rp in repo_paths:
124
+ try:
125
+ commits = get_commits_with_diffs(rp, count=200, days=90, since=since_str)
126
+ for c in commits:
127
+ c["repo_name"] = rp.name
128
+ c["repo_path"] = str(rp)
129
+ all_commits.extend(commits)
130
+ except Exception:
131
+ continue
132
+
133
+ # Sort by date
134
+ all_commits.sort(key=lambda c: c.get("date", ""), reverse=True)
135
+ return all_commits
136
+
137
+
138
+ def _format_commits_summary(commits: list[dict]) -> str:
139
+ """Format commits into a readable summary."""
140
+ if not commits:
141
+ return "No commits found for this period."
142
+
143
+ lines = [f"Found {len(commits)} commits:\n"]
144
+
145
+ # Group by repo
146
+ by_repo: dict[str, list] = {}
147
+ for c in commits:
148
+ repo = c.get("repo_name", "unknown")
149
+ if repo not in by_repo:
150
+ by_repo[repo] = []
151
+ by_repo[repo].append(c)
152
+
153
+ for repo, repo_commits in by_repo.items():
154
+ lines.append(f"\n## {repo} ({len(repo_commits)} commits)\n")
155
+ for c in repo_commits[:10]: # Limit per repo
156
+ sha = c.get("sha", "")[:7]
157
+ msg = c.get("message", "").split("\n")[0][:60]
158
+ lines.append(f"- {sha} {msg}")
159
+ if len(repo_commits) > 10:
160
+ lines.append(f" ... and {len(repo_commits) - 10} more")
161
+
162
+ return "\n".join(lines)
163
+
164
+
165
+ # =============================================================================
166
+ # MCP TOOLS
167
+ # =============================================================================
168
+
169
+ @mcp.tool()
170
+ async def repr_generate(
171
+ repo: Optional[str] = None,
172
+ since: str = "7 days ago",
173
+ template: str = "resume",
174
+ batch_size: int = 5,
175
+ local: bool = True,
176
+ ) -> str:
177
+ """Generate stories from recent git commits.
178
+
179
+ Analyzes your git history and creates narrative stories describing
180
+ what you built. Stories are saved locally and can be pushed to repr.dev.
181
+
182
+ Args:
183
+ repo: Path to specific repo (default: all tracked repos)
184
+ since: Date filter — supports ISO dates, day names, or relative
185
+ (e.g., "monday", "7 days ago", "2026-01-01")
186
+ template: Output template — resume, changelog, narrative, interview
187
+ batch_size: How many commits to include per story
188
+ local: Use local LLM (True) or cloud (False)
189
+
190
+ Returns:
191
+ Summary of generated stories with their IDs
192
+ """
193
+ from .tools import get_commits_with_diffs
194
+ from .discovery import analyze_repo
195
+ from .storage import get_processed_commit_shas
196
+
197
+ since_str = _parse_date_reference(since)
198
+ if not since_str:
199
+ return f"Could not parse date: {since}. Try: '7 days ago', 'monday', '2026-01-01'"
200
+
201
+ # Get repos
202
+ if repo:
203
+ repo_paths = [Path(repo)]
204
+ else:
205
+ tracked = get_tracked_repos()
206
+ if not tracked:
207
+ return "No repositories tracked. Run `repr repos add <path>` first."
208
+ repo_paths = [Path(r["path"]) for r in tracked if Path(r["path"]).exists()]
209
+
210
+ if not repo_paths:
211
+ return "No valid repositories found."
212
+
213
+ results = []
214
+ total_stories = 0
215
+
216
+ for repo_path in repo_paths:
217
+ try:
218
+ repo_info = analyze_repo(repo_path)
219
+ except Exception as e:
220
+ results.append(f"Error analyzing {repo_path}: {e}")
221
+ continue
222
+
223
+ # Get commits
224
+ commits = get_commits_with_diffs(repo_path, count=500, days=90, since=since_str)
225
+ if not commits:
226
+ results.append(f"{repo_info.name}: No commits found since {since}")
227
+ continue
228
+
229
+ # Filter already-processed
230
+ processed_shas = get_processed_commit_shas(repo_name=repo_info.name)
231
+ commits = [c for c in commits if c["full_sha"] not in processed_shas]
232
+
233
+ if not commits:
234
+ results.append(f"{repo_info.name}: All {len(processed_shas)} commits already processed")
235
+ continue
236
+
237
+ # Generate stories (using existing async logic)
238
+ from .openai_analysis import get_openai_client, extract_commit_batch
239
+ from .templates import build_generation_prompt
240
+ from .config import get_llm_config
241
+
242
+ llm_config = get_llm_config()
243
+
244
+ if local:
245
+ client = get_openai_client(
246
+ api_key=llm_config.get("local_api_key") or "ollama",
247
+ base_url=llm_config.get("local_api_url") or "http://localhost:11434/v1",
248
+ )
249
+ model = llm_config.get("local_model") or "llama3.2"
250
+ else:
251
+ client = get_openai_client()
252
+ model = None
253
+
254
+ # Split into batches
255
+ batches = [commits[i:i + batch_size] for i in range(0, len(commits), batch_size)]
256
+ stories_generated = []
257
+
258
+ try:
259
+ for i, batch in enumerate(batches):
260
+ system_prompt, user_prompt = build_generation_prompt(
261
+ template_name=template,
262
+ repo_name=repo_info.name,
263
+ commits=batch,
264
+ )
265
+
266
+ result = await extract_commit_batch(
267
+ client=client,
268
+ commits=batch,
269
+ batch_num=i + 1,
270
+ total_batches=len(batches),
271
+ model=model,
272
+ system_prompt=system_prompt,
273
+ user_prompt=user_prompt,
274
+ structured=True,
275
+ )
276
+
277
+ # Handle result
278
+ from .templates import StoryOutput
279
+ from .storage import save_story
280
+
281
+ story_outputs = []
282
+ if isinstance(result, list):
283
+ story_outputs = result
284
+ elif isinstance(result, StoryOutput):
285
+ story_outputs = [result]
286
+
287
+ for story_output in story_outputs:
288
+ if not story_output.content:
289
+ continue
290
+
291
+ metadata = {
292
+ "summary": story_output.summary,
293
+ "repo_name": repo_info.name,
294
+ "repo_path": str(repo_info.path),
295
+ "commit_shas": [c["full_sha"] for c in batch],
296
+ "generated_locally": local,
297
+ "template": template,
298
+ "category": story_output.category,
299
+ "scope": story_output.scope,
300
+ "stack": story_output.stack,
301
+ }
302
+
303
+ story_id = save_story(story_output.content, metadata)
304
+ stories_generated.append({
305
+ "id": story_id,
306
+ "summary": story_output.summary,
307
+ })
308
+ total_stories += 1
309
+ finally:
310
+ await client.close()
311
+
312
+ if stories_generated:
313
+ results.append(f"{repo_info.name}: Generated {len(stories_generated)} stories")
314
+ for s in stories_generated:
315
+ results.append(f" - {s['summary']} (ID: {s['id']})")
316
+ else:
317
+ results.append(f"{repo_info.name}: No stories generated")
318
+
319
+ summary = f"Generated {total_stories} total stories.\n\n" + "\n".join(results)
320
+ return summary
321
+
322
+
323
+ @mcp.tool()
324
+ async def repr_stories_list(
325
+ repo: Optional[str] = None,
326
+ category: Optional[str] = None,
327
+ limit: int = 10,
328
+ ) -> str:
329
+ """List existing stories with metadata.
330
+
331
+ Args:
332
+ repo: Filter by repository name
333
+ category: Filter by category (feature, bugfix, refactor, perf, infra, docs, test, chore)
334
+ limit: Maximum stories to return
335
+
336
+ Returns:
337
+ List of stories with ID, summary, repo, and creation date
338
+ """
339
+ stories = list_stories(repo_name=repo, limit=limit)
340
+
341
+ if category:
342
+ stories = [s for s in stories if s.get("category") == category]
343
+
344
+ if not stories:
345
+ return "No stories found. Run `repr generate` to create stories from commits."
346
+
347
+ lines = [f"Found {len(stories)} stories:\n"]
348
+
349
+ for s in stories[:limit]:
350
+ story_id = s.get("id", "unknown")
351
+ summary = s.get("summary", "Untitled")
352
+ repo_name = s.get("repo_name", "unknown")
353
+ created = s.get("created_at", "")[:10] # Just date
354
+ cat = s.get("category", "")
355
+ cat_str = f"[{cat}] " if cat else ""
356
+
357
+ lines.append(f"- {cat_str}{summary}")
358
+ lines.append(f" ID: {story_id} | Repo: {repo_name} | Created: {created}")
359
+
360
+ return "\n".join(lines)
361
+
362
+
363
+ @mcp.tool()
364
+ async def repr_week() -> str:
365
+ """Weekly summary — what you built in the last 7 days.
366
+
367
+ Provides a quick overview of your work from the past week,
368
+ including commits across all tracked repositories.
369
+
370
+ Returns:
371
+ Summary of the week's work
372
+ """
373
+ commits = _get_commits_for_period(since="7 days ago")
374
+
375
+ if not commits:
376
+ return "No commits found in the last 7 days across tracked repositories."
377
+
378
+ # Get stories from this week too
379
+ week_ago = datetime.now() - timedelta(days=7)
380
+ recent_stories = list_stories(since=week_ago, limit=20)
381
+
382
+ lines = [
383
+ "# Weekly Summary\n",
384
+ f"**Period:** Last 7 days (since {week_ago.strftime('%Y-%m-%d')})\n",
385
+ ]
386
+
387
+ # Stats
388
+ total_commits = len(commits)
389
+ repos = set(c.get("repo_name") for c in commits)
390
+ total_adds = sum(c.get("insertions", 0) for c in commits)
391
+ total_dels = sum(c.get("deletions", 0) for c in commits)
392
+
393
+ lines.append(f"**Stats:** {total_commits} commits across {len(repos)} repos")
394
+ lines.append(f"**Lines:** +{total_adds} / -{total_dels}\n")
395
+
396
+ # Recent stories
397
+ if recent_stories:
398
+ lines.append(f"## Stories Generated ({len(recent_stories)})\n")
399
+ for s in recent_stories[:5]:
400
+ lines.append(f"- {s.get('summary', 'Untitled')} ({s.get('repo_name', 'unknown')})")
401
+
402
+ # Commits by repo
403
+ lines.append("\n## Commits by Repository\n")
404
+ by_repo: dict[str, list] = {}
405
+ for c in commits:
406
+ repo = c.get("repo_name", "unknown")
407
+ if repo not in by_repo:
408
+ by_repo[repo] = []
409
+ by_repo[repo].append(c)
410
+
411
+ for repo, repo_commits in sorted(by_repo.items(), key=lambda x: -len(x[1])):
412
+ lines.append(f"\n### {repo} ({len(repo_commits)} commits)")
413
+ for c in repo_commits[:5]:
414
+ msg = c.get("message", "").split("\n")[0][:60]
415
+ lines.append(f"- {msg}")
416
+ if len(repo_commits) > 5:
417
+ lines.append(f" ... and {len(repo_commits) - 5} more")
418
+
419
+ lines.append("\n---")
420
+ lines.append("Run `repr generate --since '7 days ago'` to turn these into stories.")
421
+
422
+ return "\n".join(lines)
423
+
424
+
425
+ @mcp.tool()
426
+ async def repr_standup() -> str:
427
+ """Quick standup — what you did yesterday and today.
428
+
429
+ Perfect for daily standups or quick status updates.
430
+ Shows commits from the last 2 days.
431
+
432
+ Returns:
433
+ Summary suitable for a standup meeting
434
+ """
435
+ today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
436
+ yesterday = today - timedelta(days=1)
437
+
438
+ commits = _get_commits_for_period(since="2 days ago")
439
+
440
+ if not commits:
441
+ return "No commits found in the last 2 days."
442
+
443
+ # Split into yesterday and today
444
+ today_commits = []
445
+ yesterday_commits = []
446
+
447
+ for c in commits:
448
+ try:
449
+ commit_date = datetime.fromisoformat(c.get("date", "").replace("Z", "+00:00"))
450
+ if commit_date.date() >= today.date():
451
+ today_commits.append(c)
452
+ elif commit_date.date() >= yesterday.date():
453
+ yesterday_commits.append(c)
454
+ except (ValueError, TypeError):
455
+ yesterday_commits.append(c)
456
+
457
+ lines = ["# Standup Summary\n"]
458
+
459
+ if yesterday_commits:
460
+ lines.append("## Yesterday")
461
+ for c in yesterday_commits[:10]:
462
+ repo = c.get("repo_name", "")
463
+ msg = c.get("message", "").split("\n")[0][:50]
464
+ lines.append(f"- [{repo}] {msg}")
465
+ if len(yesterday_commits) > 10:
466
+ lines.append(f" ... and {len(yesterday_commits) - 10} more")
467
+ lines.append("")
468
+
469
+ if today_commits:
470
+ lines.append("## Today")
471
+ for c in today_commits[:10]:
472
+ repo = c.get("repo_name", "")
473
+ msg = c.get("message", "").split("\n")[0][:50]
474
+ lines.append(f"- [{repo}] {msg}")
475
+ if len(today_commits) > 10:
476
+ lines.append(f" ... and {len(today_commits) - 10} more")
477
+
478
+ if not today_commits:
479
+ lines.append("## Today\nNo commits yet today.")
480
+
481
+ return "\n".join(lines)
482
+
483
+
484
+ @mcp.tool()
485
+ async def repr_profile() -> str:
486
+ """Get your developer profile.
487
+
488
+ Returns your repr profile information including bio,
489
+ skills, and profile settings.
490
+
491
+ Returns:
492
+ Your developer profile as markdown
493
+ """
494
+ profile = get_profile_config()
495
+
496
+ lines = ["# Developer Profile\n"]
497
+
498
+ username = profile.get("username")
499
+ if username:
500
+ lines.append(f"**Username:** @{username}")
501
+ if profile.get("claimed"):
502
+ lines.append(f"**Profile URL:** https://repr.dev/@{username}")
503
+
504
+ bio = profile.get("bio")
505
+ if bio:
506
+ lines.append(f"\n## Bio\n{bio}")
507
+
508
+ location = profile.get("location")
509
+ if location:
510
+ lines.append(f"\n**Location:** {location}")
511
+
512
+ website = profile.get("website")
513
+ if website:
514
+ lines.append(f"**Website:** {website}")
515
+
516
+ # Show story stats
517
+ all_stories = list_stories(limit=1000)
518
+ if all_stories:
519
+ lines.append(f"\n## Stats")
520
+ lines.append(f"- Total stories: {len(all_stories)}")
521
+
522
+ # Categories
523
+ categories: dict[str, int] = {}
524
+ for s in all_stories:
525
+ cat = s.get("category", "uncategorized")
526
+ categories[cat] = categories.get(cat, 0) + 1
527
+
528
+ if categories:
529
+ lines.append("- By category:")
530
+ for cat, count in sorted(categories.items(), key=lambda x: -x[1])[:5]:
531
+ lines.append(f" - {cat}: {count}")
532
+
533
+ # Tracked repos
534
+ tracked = get_tracked_repos()
535
+ if tracked:
536
+ lines.append(f"\n## Tracked Repositories ({len(tracked)})")
537
+ for r in tracked[:10]:
538
+ lines.append(f"- {Path(r['path']).name}")
539
+
540
+ return "\n".join(lines)
541
+
542
+
543
+ # =============================================================================
544
+ # TIMELINE & SESSION TOOLS (Phase 5)
545
+ # =============================================================================
546
+
547
+ @mcp.tool()
548
+ async def init_project(
549
+ repo_path: str,
550
+ include_sessions: bool = True,
551
+ session_source: str = "auto",
552
+ days_back: int = 90,
553
+ max_commits: int = 500,
554
+ ) -> dict:
555
+ """
556
+ Initialize repr for a project with unified timeline.
557
+
558
+ Creates .repr/timeline.json with commits and optionally AI session context.
559
+ Use this before querying context from a project.
560
+
561
+ Args:
562
+ repo_path: Path to the git repository
563
+ include_sessions: Include AI session context (Claude Code, Clawdbot)
564
+ session_source: "auto" (detect), "claude_code", "clawdbot", or "none"
565
+ days_back: Number of days of history to include
566
+ max_commits: Maximum commits to include
567
+
568
+ Returns:
569
+ Initialization status with stats
570
+ """
571
+ from pathlib import Path
572
+ from .timeline import (
573
+ detect_project_root,
574
+ is_initialized,
575
+ init_timeline_commits_only,
576
+ init_timeline_with_sessions,
577
+ get_timeline_stats,
578
+ )
579
+ from .loaders import detect_session_source
580
+
581
+ project_path = Path(repo_path).resolve()
582
+
583
+ # Verify it's a git repo
584
+ repo_root = detect_project_root(project_path)
585
+ if not repo_root:
586
+ return {"success": False, "error": f"Not a git repository: {repo_path}"}
587
+
588
+ project_path = repo_root
589
+
590
+ # Check if already initialized
591
+ if is_initialized(project_path):
592
+ return {
593
+ "success": True,
594
+ "already_initialized": True,
595
+ "project": str(project_path),
596
+ "message": "Timeline already exists. Use get_context to query it.",
597
+ }
598
+
599
+ # Determine session sources
600
+ session_sources = []
601
+ if include_sessions and session_source != "none":
602
+ if session_source == "auto":
603
+ session_sources = detect_session_source(project_path)
604
+ else:
605
+ session_sources = [session_source]
606
+
607
+ try:
608
+ if session_sources:
609
+ # Get API key, base_url, and model - check all sources including local LLM
610
+ from .config import get_byok_config, get_litellm_config, get_llm_config
611
+ import os
612
+
613
+ api_key = None
614
+ base_url = None
615
+ model = None
616
+
617
+ # Check BYOK first
618
+ byok_config = get_byok_config("openai")
619
+ if byok_config:
620
+ api_key = byok_config.get("api_key")
621
+ base_url = byok_config.get("base_url")
622
+
623
+ # Check local LLM config
624
+ if not api_key:
625
+ llm_config = get_llm_config()
626
+ if llm_config.get("default") == "local" and llm_config.get("local_api_key"):
627
+ api_key = llm_config["local_api_key"]
628
+ base_url = llm_config.get("local_api_url")
629
+ model = llm_config.get("local_model")
630
+
631
+ # Check LiteLLM (cloud)
632
+ if not api_key:
633
+ _, api_key = get_litellm_config()
634
+
635
+ # Check environment
636
+ if not api_key:
637
+ api_key = os.environ.get("OPENAI_API_KEY")
638
+
639
+ if not api_key:
640
+ # Fall back to commits only
641
+ session_sources = []
642
+
643
+ if session_sources:
644
+ kwargs = {
645
+ "days": days_back,
646
+ "max_commits": max_commits,
647
+ "session_sources": session_sources,
648
+ "api_key": api_key,
649
+ "base_url": base_url,
650
+ }
651
+ if model:
652
+ kwargs["model"] = model
653
+ timeline = await init_timeline_with_sessions(project_path, **kwargs)
654
+ else:
655
+ timeline = init_timeline_commits_only(
656
+ project_path,
657
+ days=days_back,
658
+ max_commits=max_commits,
659
+ )
660
+
661
+ stats = get_timeline_stats(timeline)
662
+
663
+ return {
664
+ "success": True,
665
+ "project": str(project_path),
666
+ "timeline_path": str(project_path / ".repr" / "timeline.json"),
667
+ "session_sources": session_sources,
668
+ "stats": {
669
+ "total_entries": stats["total_entries"],
670
+ "commits": stats["commit_count"],
671
+ "sessions": stats["session_count"],
672
+ "merged": stats["merged_count"],
673
+ },
674
+ }
675
+
676
+ except Exception as e:
677
+ return {"success": False, "error": str(e)}
678
+
679
+
680
+ @mcp.tool()
681
+ async def ingest_session(
682
+ session_file: Optional[str] = None,
683
+ project_path: Optional[str] = None,
684
+ session_source: str = "auto",
685
+ ) -> dict:
686
+ """
687
+ Ingest a completed AI session into the timeline.
688
+
689
+ Called by SessionEnd hooks to capture context from AI coding sessions.
690
+ Extracts structured context (problem, approach, decisions, outcome)
691
+ and links to related commits.
692
+
693
+ Args:
694
+ session_file: Path to session JSONL file
695
+ project_path: Project path (auto-detected from session cwd if not provided)
696
+ session_source: "auto", "claude_code", or "clawdbot"
697
+
698
+ Returns:
699
+ Ingestion result with extracted context summary
700
+ """
701
+ from pathlib import Path
702
+ from .timeline import (
703
+ detect_project_root,
704
+ is_initialized,
705
+ load_timeline,
706
+ save_timeline,
707
+ extract_commits_from_git,
708
+ )
709
+ from .models import (
710
+ TimelineEntry,
711
+ TimelineEntryType,
712
+ match_commits_to_sessions,
713
+ )
714
+ from .loaders import ClaudeCodeLoader, ClawdbotLoader
715
+ from .session_extractor import SessionExtractor
716
+ from .config import get_byok_config, get_litellm_config
717
+ import os
718
+
719
+ if not session_file:
720
+ return {"success": False, "error": "session_file is required"}
721
+
722
+ file_path = Path(session_file).resolve()
723
+ if not file_path.exists():
724
+ return {"success": False, "error": f"Session file not found: {session_file}"}
725
+
726
+ # Determine source
727
+ if session_source == "auto":
728
+ if ".claude" in str(file_path):
729
+ session_source = "claude_code"
730
+ elif ".clawdbot" in str(file_path):
731
+ session_source = "clawdbot"
732
+ else:
733
+ session_source = "claude_code" # Default
734
+
735
+ # Load session
736
+ if session_source == "claude_code":
737
+ loader = ClaudeCodeLoader()
738
+ elif session_source == "clawdbot":
739
+ loader = ClawdbotLoader()
740
+ else:
741
+ return {"success": False, "error": f"Unknown source: {session_source}"}
742
+
743
+ session = loader.load_session(file_path)
744
+ if not session:
745
+ return {"success": False, "error": "Failed to load session"}
746
+
747
+ # Determine project path
748
+ if project_path:
749
+ proj_path = Path(project_path).resolve()
750
+ elif session.cwd:
751
+ proj_path = detect_project_root(Path(session.cwd))
752
+ else:
753
+ proj_path = None
754
+
755
+ if not proj_path:
756
+ return {"success": False, "error": "Could not detect project path. Provide project_path."}
757
+
758
+ # Check timeline exists
759
+ if not is_initialized(proj_path):
760
+ return {
761
+ "success": False,
762
+ "error": f"Timeline not initialized for {proj_path}",
763
+ "hint": "Run init_project first",
764
+ }
765
+
766
+ # Load timeline
767
+ timeline = load_timeline(proj_path)
768
+ if not timeline:
769
+ return {"success": False, "error": "Failed to load timeline"}
770
+
771
+ # Check if already ingested
772
+ for entry in timeline.entries:
773
+ if entry.session_context and entry.session_context.session_id == session.id:
774
+ return {
775
+ "success": True,
776
+ "skipped": True,
777
+ "reason": "Session already ingested",
778
+ "session_id": session.id,
779
+ }
780
+
781
+ # Get API key
782
+ api_key = None
783
+ byok_config = get_byok_config("openai")
784
+ if byok_config:
785
+ api_key = byok_config.get("api_key")
786
+ if not api_key:
787
+ _, api_key = get_litellm_config()
788
+ if not api_key:
789
+ api_key = os.environ.get("OPENAI_API_KEY")
790
+
791
+ if not api_key:
792
+ return {"success": False, "error": "No API key for extraction"}
793
+
794
+ # Extract context
795
+ try:
796
+ extractor = SessionExtractor(api_key=api_key)
797
+ context = await extractor.extract_context(session)
798
+ except Exception as e:
799
+ return {"success": False, "error": f"Extraction failed: {e}"}
800
+
801
+ # Get recent commits to link
802
+ recent_commits = extract_commits_from_git(proj_path, days=1, max_commits=50)
803
+
804
+ if recent_commits:
805
+ matches = match_commits_to_sessions(recent_commits, [session])
806
+ context.linked_commits = [m.commit_sha for m in matches if m.session_id == session.id]
807
+
808
+ # Create entry
809
+ entry_type = TimelineEntryType.SESSION
810
+
811
+ # Try to merge with existing commit entries
812
+ if context.linked_commits:
813
+ for commit_sha in context.linked_commits:
814
+ for entry in timeline.entries:
815
+ if entry.commit and entry.commit.sha == commit_sha:
816
+ entry.session_context = context
817
+ entry.type = TimelineEntryType.MERGED
818
+ entry_type = None
819
+ break
820
+ if entry_type is None:
821
+ break
822
+
823
+ # Add standalone if not merged
824
+ if entry_type is not None:
825
+ entry = TimelineEntry(
826
+ timestamp=context.timestamp,
827
+ type=entry_type,
828
+ commit=None,
829
+ session_context=context,
830
+ story=None,
831
+ )
832
+ timeline.add_entry(entry)
833
+
834
+ # Save
835
+ save_timeline(timeline, proj_path)
836
+
837
+ return {
838
+ "success": True,
839
+ "session_id": session.id,
840
+ "project": str(proj_path),
841
+ "context": {
842
+ "problem": context.problem[:200],
843
+ "approach": context.approach[:200],
844
+ "decisions": context.decisions[:3],
845
+ "outcome": context.outcome,
846
+ "files_modified": context.files_modified[:10],
847
+ },
848
+ "linked_commits": context.linked_commits,
849
+ "entry_type": entry_type.value if entry_type else "merged",
850
+ }
851
+
852
+
853
+ @mcp.tool()
854
+ async def get_context(
855
+ query: str,
856
+ project: Optional[str] = None,
857
+ days_back: int = 30,
858
+ include_sessions: bool = True,
859
+ limit: int = 10,
860
+ ) -> list[dict]:
861
+ """
862
+ Query developer context from the timeline.
863
+
864
+ Searches through commits and session context to find relevant
865
+ information about how this developer approaches problems.
866
+
867
+ Args:
868
+ query: Natural language query (e.g., "how do I handle auth", "what patterns for caching")
869
+ project: Project path (default: current directory or all tracked repos)
870
+ days_back: How many days of history to search
871
+ include_sessions: Include session context in results
872
+ limit: Maximum results to return
873
+
874
+ Returns:
875
+ List of relevant context entries with problem, approach, decisions, etc.
876
+ """
877
+ from pathlib import Path
878
+ from datetime import datetime, timedelta, timezone
879
+ from .timeline import (
880
+ detect_project_root,
881
+ is_initialized,
882
+ load_timeline,
883
+ query_timeline,
884
+ )
885
+ from .models import TimelineEntryType
886
+ from .config import get_tracked_repos
887
+
888
+ results = []
889
+
890
+ # Determine projects to search
891
+ projects = []
892
+ if project:
893
+ proj_path = Path(project).resolve()
894
+ repo_root = detect_project_root(proj_path)
895
+ if repo_root and is_initialized(repo_root):
896
+ projects.append(repo_root)
897
+ else:
898
+ # Try current directory
899
+ cwd_root = detect_project_root(Path.cwd())
900
+ if cwd_root and is_initialized(cwd_root):
901
+ projects.append(cwd_root)
902
+
903
+ # Also check tracked repos
904
+ for repo in get_tracked_repos():
905
+ repo_path = Path(repo["path"])
906
+ if repo_path.exists() and is_initialized(repo_path):
907
+ if repo_path not in projects:
908
+ projects.append(repo_path)
909
+
910
+ if not projects:
911
+ return [{
912
+ "error": "No initialized projects found",
913
+ "hint": "Run init_project first or specify a project path",
914
+ }]
915
+
916
+ # Query keywords from natural language
917
+ query_lower = query.lower()
918
+ query_words = set(query_lower.split())
919
+
920
+ since = datetime.now(timezone.utc) - timedelta(days=days_back)
921
+
922
+ for proj_path in projects:
923
+ timeline = load_timeline(proj_path)
924
+ if not timeline:
925
+ continue
926
+
927
+ # Get entries in time range
928
+ entries = query_timeline(timeline, since=since)
929
+
930
+ for entry in entries:
931
+ score = 0.0
932
+ matched_fields = []
933
+
934
+ # Score based on commit message
935
+ if entry.commit:
936
+ msg_lower = entry.commit.message.lower()
937
+ for word in query_words:
938
+ if word in msg_lower:
939
+ score += 0.3
940
+ matched_fields.append("commit_message")
941
+ break
942
+
943
+ # Check files
944
+ for f in entry.commit.files:
945
+ f_lower = f.lower()
946
+ for word in query_words:
947
+ if word in f_lower:
948
+ score += 0.2
949
+ matched_fields.append("files")
950
+ break
951
+
952
+ # Score based on session context
953
+ if entry.session_context and include_sessions:
954
+ ctx = entry.session_context
955
+
956
+ # Check problem
957
+ if any(word in ctx.problem.lower() for word in query_words):
958
+ score += 0.5
959
+ matched_fields.append("problem")
960
+
961
+ # Check approach
962
+ if any(word in ctx.approach.lower() for word in query_words):
963
+ score += 0.4
964
+ matched_fields.append("approach")
965
+
966
+ # Check decisions
967
+ for decision in ctx.decisions:
968
+ if any(word in decision.lower() for word in query_words):
969
+ score += 0.3
970
+ matched_fields.append("decisions")
971
+ break
972
+
973
+ # Check lessons
974
+ for lesson in ctx.lessons:
975
+ if any(word in lesson.lower() for word in query_words):
976
+ score += 0.3
977
+ matched_fields.append("lessons")
978
+ break
979
+
980
+ if score > 0:
981
+ result = {
982
+ "score": round(score, 2),
983
+ "matched_fields": list(set(matched_fields)),
984
+ "project": proj_path.name,
985
+ "timestamp": entry.timestamp.isoformat(),
986
+ "type": entry.type.value,
987
+ }
988
+
989
+ if entry.commit:
990
+ result["commit"] = {
991
+ "sha": entry.commit.sha[:8],
992
+ "message": entry.commit.message.split("\n")[0][:100],
993
+ "files": entry.commit.files[:5],
994
+ }
995
+
996
+ if entry.session_context:
997
+ ctx = entry.session_context
998
+ result["context"] = {
999
+ "problem": ctx.problem,
1000
+ "approach": ctx.approach,
1001
+ "decisions": ctx.decisions,
1002
+ "outcome": ctx.outcome,
1003
+ "lessons": ctx.lessons,
1004
+ }
1005
+
1006
+ results.append(result)
1007
+
1008
+ # Sort by score and limit
1009
+ results.sort(key=lambda r: r["score"], reverse=True)
1010
+ return results[:limit]
1011
+
1012
+
1013
+ # =============================================================================
1014
+ # STORY-CENTRIC TOOLS (Phase 7)
1015
+ # =============================================================================
1016
+
1017
+ def _story_to_dict(story, project_name: str = "") -> dict:
1018
+ """Convert a Story object to a dictionary for MCP response."""
1019
+ return {
1020
+ "project": project_name,
1021
+ "story": {
1022
+ "id": story.id,
1023
+ "title": story.title,
1024
+ "problem": story.problem,
1025
+ "approach": story.approach,
1026
+ "implementation_details": story.implementation_details,
1027
+ "decisions": story.decisions,
1028
+ "outcome": story.outcome,
1029
+ "lessons": story.lessons,
1030
+ "category": story.category,
1031
+ "files": story.files[:10] if story.files else [],
1032
+ "commit_count": len(story.commit_shas) if story.commit_shas else 0,
1033
+ "session_count": len(story.session_ids) if story.session_ids else 0,
1034
+ "started_at": story.started_at.isoformat() if story.started_at else None,
1035
+ }
1036
+ }
1037
+
1038
+
1039
+ @mcp.tool()
1040
+ async def search_stories(
1041
+ query: str,
1042
+ files: Optional[list[str]] = None,
1043
+ since: Optional[str] = None,
1044
+ category: Optional[str] = None,
1045
+ limit: int = 10,
1046
+ ) -> list[dict]:
1047
+ """
1048
+ Search developer stories using the content index.
1049
+
1050
+ Efficiently finds relevant stories by keyword, file, or time range.
1051
+ Returns Story objects with full WHY/WHAT context.
1052
+
1053
+ Args:
1054
+ query: Keywords to search (matches title, problem, keywords)
1055
+ files: Optional file paths to filter by
1056
+ since: Optional time filter (e.g., "7 days ago", "2026-01-01")
1057
+ category: Optional category filter (feature, bugfix, refactor, etc.)
1058
+ limit: Maximum stories to return
1059
+
1060
+ Returns:
1061
+ List of matching stories with context
1062
+ """
1063
+ from .db import get_db
1064
+
1065
+ db = get_db()
1066
+ stories = db.search_stories(query, files=files, limit=limit)
1067
+
1068
+ # Apply category filter
1069
+ if category:
1070
+ stories = [s for s in stories if s.category == category]
1071
+
1072
+ results = []
1073
+ for story in stories:
1074
+ result = _story_to_dict(story)
1075
+ result["score"] = 1.0 # FTS already ranks by relevance
1076
+ results.append(result)
1077
+
1078
+ return results[:limit]
1079
+
1080
+
1081
+ @mcp.tool()
1082
+ async def get_story(story_id: str) -> dict:
1083
+ """
1084
+ Get full details of a specific story by ID.
1085
+
1086
+ Args:
1087
+ story_id: Story UUID
1088
+
1089
+ Returns:
1090
+ Complete story with all context and linked commits/sessions
1091
+ """
1092
+ from .db import get_db
1093
+
1094
+ db = get_db()
1095
+ story = db.get_story(story_id)
1096
+
1097
+ if story:
1098
+ return {
1099
+ "found": True,
1100
+ "story": story.model_dump(),
1101
+ "commits": [
1102
+ {"sha": sha[:8]} for sha in (story.commit_shas or [])
1103
+ ],
1104
+ "sessions": [
1105
+ {"id": sid} for sid in (story.session_ids or [])
1106
+ ],
1107
+ }
1108
+
1109
+ return {"found": False, "story_id": story_id}
1110
+
1111
+
1112
+ @mcp.tool()
1113
+ async def list_recent_stories(
1114
+ days: int = 7,
1115
+ category: Optional[str] = None,
1116
+ limit: int = 20,
1117
+ ) -> list[dict]:
1118
+ """
1119
+ List recent stories from the timeline.
1120
+
1121
+ Args:
1122
+ days: Number of days to look back
1123
+ category: Optional category filter
1124
+ limit: Maximum stories to return
1125
+
1126
+ Returns:
1127
+ List of recent stories with summaries
1128
+ """
1129
+ from datetime import datetime, timedelta, timezone
1130
+ from .db import get_db
1131
+
1132
+ since = datetime.now(timezone.utc) - timedelta(days=days)
1133
+
1134
+ db = get_db()
1135
+ stories = db.list_stories(category=category, since=since, limit=limit)
1136
+
1137
+ results = []
1138
+ for story in stories:
1139
+ story_time = story.started_at or story.created_at
1140
+ results.append({
1141
+ "id": story.id,
1142
+ "title": story.title,
1143
+ "problem": story.problem[:200] if story.problem else "",
1144
+ "category": story.category,
1145
+ "commit_count": len(story.commit_shas) if story.commit_shas else 0,
1146
+ "session_count": len(story.session_ids) if story.session_ids else 0,
1147
+ "implementation_detail_count": len(story.implementation_details) if story.implementation_details else 0,
1148
+ "timestamp": story_time.isoformat() if story_time else None,
1149
+ })
1150
+
1151
+ return results
1152
+
1153
+
1154
+ # =============================================================================
1155
+ # MCP RESOURCES
1156
+ # =============================================================================
1157
+
1158
+
1159
+ @mcp.resource("repr://profile")
1160
+ async def get_profile_resource() -> str:
1161
+ """Current developer profile."""
1162
+ return await repr_profile()
1163
+
1164
+
1165
+ @mcp.resource("repr://stories/recent")
1166
+ async def get_recent_stories_resource() -> str:
1167
+ """Last 10 stories."""
1168
+ return await repr_stories_list(limit=10)
1169
+
1170
+
1171
+ # =============================================================================
1172
+ # SERVER RUNNER
1173
+ # =============================================================================
1174
+
1175
+ def run_server(
1176
+ sse: bool = False,
1177
+ port: int = 3001,
1178
+ host: str = "127.0.0.1",
1179
+ ) -> None:
1180
+ """Run the MCP server.
1181
+
1182
+ Args:
1183
+ sse: Use SSE transport instead of stdio
1184
+ port: Port for SSE mode
1185
+ host: Host for SSE mode
1186
+ """
1187
+ if sse:
1188
+ # SSE mode for remote/web clients
1189
+ mcp.run(transport="sse", host=host, port=port)
1190
+ else:
1191
+ # Default stdio mode for local MCP clients
1192
+ mcp.run()
1193
+
1194
+
1195
+ if __name__ == "__main__":
1196
+ run_server()