okb 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
okb/mcp_server.py ADDED
@@ -0,0 +1,1393 @@
1
+ """
2
+ MCP Server for Knowledge Base.
3
+
4
+ Exposes semantic search to Claude Code via the Model Context Protocol.
5
+
6
+ Usage:
7
+ python mcp_server.py
8
+
9
+ Configure in Claude Code (~/.claude.json or similar):
10
+ {
11
+ "mcpServers": {
12
+ "knowledge-base": {
13
+ "command": "python",
14
+ "args": ["/path/to/mcp_server.py"]
15
+ }
16
+ }
17
+ }
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import asyncio
23
+ import hashlib
24
+ import re
25
+ import sys
26
+ import uuid
27
+ from datetime import UTC, datetime
28
+ from typing import Any
29
+
30
+ import psycopg
31
+ from mcp.server import Server
32
+ from mcp.server.stdio import stdio_server
33
+ from mcp.types import (
34
+ CallToolResult,
35
+ TextContent,
36
+ Tool,
37
+ )
38
+ from pgvector.psycopg import register_vector
39
+ from psycopg.rows import dict_row
40
+
41
+ from .config import config
42
+ from .local_embedder import embed_document, embed_query, warmup
43
+
44
+
45
+ def get_document_date(metadata: dict) -> str | None:
46
+ """Get best available date: document_date > file_modified_at."""
47
+ return metadata.get("document_date") or metadata.get("file_modified_at")
48
+
49
+
50
+ def format_relative_time(iso_timestamp: str) -> str:
51
+ """Format ISO timestamp as relative time (e.g., '3d ago')."""
52
+ try:
53
+ dt = datetime.fromisoformat(iso_timestamp.replace("Z", "+00:00"))
54
+ # Handle naive datetimes (date-only strings like '2020-11-10')
55
+ if dt.tzinfo is None:
56
+ dt = dt.replace(tzinfo=UTC)
57
+ delta = datetime.now(UTC) - dt
58
+ if delta.days < 0:
59
+ return "future"
60
+ if delta.days > 365:
61
+ return f"{delta.days // 365}y ago"
62
+ if delta.days > 30:
63
+ return f"{delta.days // 30}mo ago"
64
+ if delta.days > 0:
65
+ return f"{delta.days}d ago"
66
+ if delta.seconds > 3600:
67
+ return f"{delta.seconds // 3600}h ago"
68
+ if delta.seconds > 60:
69
+ return f"{delta.seconds // 60}m ago"
70
+ return "just now"
71
+ except (ValueError, TypeError):
72
+ return ""
73
+
74
+
75
+ def parse_since_filter(since: str) -> datetime | None:
76
+ """Parse since filter like '7d', '30d', '6mo' or ISO date."""
77
+ from datetime import timedelta
78
+
79
+ now = datetime.now(UTC)
80
+ match = re.match(r"^(\d+)(d|mo|y)$", since.lower())
81
+ if match:
82
+ value, unit = int(match.group(1)), match.group(2)
83
+ days = value * {"d": 1, "mo": 30, "y": 365}[unit]
84
+ return now - timedelta(days=days)
85
+ try:
86
+ return datetime.fromisoformat(since.replace("Z", "+00:00"))
87
+ except ValueError:
88
+ return None
89
+
90
+
91
+ def parse_date_range(date_str: str) -> tuple[datetime, datetime] | None:
92
+ """Parse date range like 'today', 'tomorrow', 'this_week', '2024-01-15', or ISO date."""
93
+ from datetime import timedelta
94
+
95
+ now = datetime.now(UTC)
96
+ today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
97
+ today_end = today_start + timedelta(days=1)
98
+
99
+ if date_str.lower() == "today":
100
+ return (today_start, today_end)
101
+ elif date_str.lower() == "tomorrow":
102
+ return (today_end, today_end + timedelta(days=1))
103
+ elif date_str.lower() == "this_week":
104
+ # Monday to Sunday
105
+ days_since_monday = now.weekday()
106
+ week_start = today_start - timedelta(days=days_since_monday)
107
+ return (week_start, week_start + timedelta(days=7))
108
+ elif date_str.lower() == "next_week":
109
+ days_since_monday = now.weekday()
110
+ next_week_start = today_start + timedelta(days=7 - days_since_monday)
111
+ return (next_week_start, next_week_start + timedelta(days=7))
112
+ elif re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
113
+ # Single date: return that day
114
+ try:
115
+ dt = datetime.fromisoformat(date_str).replace(tzinfo=UTC)
116
+ return (dt, dt + timedelta(days=1))
117
+ except ValueError:
118
+ return None
119
+ return None
120
+
121
+
122
+ class KnowledgeBase:
123
+ """Knowledge base with semantic and keyword search."""
124
+
125
+ def __init__(self, db_url: str):
126
+ self.db_url = db_url
127
+ self._conn = None
128
+
129
+ def get_connection(self):
130
+ """Get or create database connection."""
131
+ if self._conn is None or self._conn.closed:
132
+ self._conn = psycopg.connect(self.db_url, row_factory=dict_row)
133
+ register_vector(self._conn)
134
+ return self._conn
135
+
136
+ def close(self):
137
+ """Close the database connection if open."""
138
+ if self._conn is not None and not self._conn.closed:
139
+ self._conn.close()
140
+ self._conn = None
141
+
142
+ def __enter__(self):
143
+ return self
144
+
145
+ def __exit__(self, exc_type, exc_val, exc_tb):
146
+ self.close()
147
+ return False
148
+
149
+ def __del__(self):
150
+ self.close()
151
+
152
+ def semantic_search(
153
+ self,
154
+ query: str,
155
+ limit: int = 5,
156
+ source_type: str | None = None,
157
+ project: str | None = None,
158
+ min_score: float = 0.25,
159
+ since: str | None = None,
160
+ ) -> list[dict]:
161
+ """
162
+ Search for semantically similar chunks.
163
+
164
+ Returns chunks with their parent document context.
165
+ """
166
+ embedding = embed_query(query)
167
+ conn = self.get_connection()
168
+
169
+ # Build query with optional filters
170
+ sql = """
171
+ SELECT
172
+ c.content,
173
+ c.chunk_index,
174
+ c.metadata as chunk_metadata,
175
+ d.source_path,
176
+ d.source_type,
177
+ d.title,
178
+ d.metadata as doc_metadata,
179
+ 1 - (c.embedding <=> %s::vector) as similarity
180
+ FROM chunks c
181
+ JOIN documents d ON c.document_id = d.id
182
+ WHERE 1 - (c.embedding <=> %s::vector) > %s
183
+ """
184
+ params: list[Any] = [embedding, embedding, min_score]
185
+
186
+ if source_type:
187
+ sql += " AND d.source_type = %s"
188
+ params.append(source_type)
189
+
190
+ if project:
191
+ sql += " AND d.metadata->>'project' = %s"
192
+ params.append(project)
193
+
194
+ if since:
195
+ since_dt = parse_since_filter(since)
196
+ if since_dt:
197
+ sql += """ AND COALESCE(
198
+ (d.metadata->>'document_date')::timestamptz,
199
+ (d.metadata->>'file_modified_at')::timestamptz
200
+ ) >= %s"""
201
+ params.append(since_dt)
202
+
203
+ sql += " ORDER BY c.embedding <=> %s::vector LIMIT %s"
204
+ params.extend([embedding, min(limit, config.max_limit)])
205
+
206
+ results = conn.execute(sql, params).fetchall()
207
+ return [dict(r) for r in results]
208
+
209
+ def keyword_search(
210
+ self,
211
+ query: str,
212
+ limit: int = 5,
213
+ source_type: str | None = None,
214
+ since: str | None = None,
215
+ ) -> list[dict]:
216
+ """
217
+ Full-text keyword search.
218
+
219
+ Better for exact matches, code symbols, function names.
220
+ """
221
+ conn = self.get_connection()
222
+
223
+ sql = """
224
+ SELECT
225
+ c.content,
226
+ c.chunk_index,
227
+ d.source_path,
228
+ d.source_type,
229
+ d.title,
230
+ d.metadata as doc_metadata,
231
+ ts_rank(to_tsvector('english', c.content), plainto_tsquery('english', %s)) as rank
232
+ FROM chunks c
233
+ JOIN documents d ON c.document_id = d.id
234
+ WHERE to_tsvector('english', c.content) @@ plainto_tsquery('english', %s)
235
+ """
236
+ params: list[Any] = [query, query]
237
+
238
+ if source_type:
239
+ sql += " AND d.source_type = %s"
240
+ params.append(source_type)
241
+
242
+ if since:
243
+ since_dt = parse_since_filter(since)
244
+ if since_dt:
245
+ sql += """ AND COALESCE(
246
+ (d.metadata->>'document_date')::timestamptz,
247
+ (d.metadata->>'file_modified_at')::timestamptz
248
+ ) >= %s"""
249
+ params.append(since_dt)
250
+
251
+ sql += " ORDER BY rank DESC LIMIT %s"
252
+ params.append(min(limit, config.max_limit))
253
+
254
+ results = conn.execute(sql, params).fetchall()
255
+ return [dict(r) for r in results]
256
+
257
+ def hybrid_search(
258
+ self,
259
+ query: str,
260
+ limit: int = 5,
261
+ source_type: str | None = None,
262
+ semantic_weight: float = 0.7,
263
+ since: str | None = None,
264
+ ) -> list[dict]:
265
+ """
266
+ Hybrid search combining semantic and keyword results.
267
+
268
+ Uses Reciprocal Rank Fusion (RRF) to merge results.
269
+ """
270
+ # Get both result sets
271
+ semantic_results = self.semantic_search(
272
+ query, limit=limit * 2, source_type=source_type, since=since
273
+ )
274
+ keyword_results = self.keyword_search(
275
+ query, limit=limit * 2, source_type=source_type, since=since
276
+ )
277
+
278
+ # RRF scoring
279
+ k = 60 # RRF constant
280
+ scores: dict[str, float] = {}
281
+ results_map: dict[str, dict] = {}
282
+
283
+ for rank, r in enumerate(semantic_results):
284
+ key = f"{r['source_path']}:{r['chunk_index']}"
285
+ scores[key] = scores.get(key, 0) + semantic_weight / (k + rank + 1)
286
+ results_map[key] = r
287
+
288
+ for rank, r in enumerate(keyword_results):
289
+ key = f"{r['source_path']}:{r['chunk_index']}"
290
+ scores[key] = scores.get(key, 0) + (1 - semantic_weight) / (k + rank + 1)
291
+ if key not in results_map:
292
+ results_map[key] = r
293
+
294
+ # Sort by combined score
295
+ sorted_keys = sorted(scores.keys(), key=lambda k: scores[k], reverse=True)
296
+
297
+ return [results_map[k] for k in sorted_keys[:limit]]
298
+
299
+ def list_sources(self) -> list[dict]:
300
+ """List all indexed sources with stats."""
301
+ conn = self.get_connection()
302
+ results = conn.execute("SELECT * FROM index_stats").fetchall()
303
+ return [dict(r) for r in results]
304
+
305
+ def list_projects(self) -> list[str]:
306
+ """List all known projects."""
307
+ conn = self.get_connection()
308
+ results = conn.execute("""
309
+ SELECT DISTINCT metadata->>'project' as project
310
+ FROM documents
311
+ WHERE metadata->>'project' IS NOT NULL
312
+ ORDER BY project
313
+ """).fetchall()
314
+ return [r["project"] for r in results]
315
+
316
+ def get_document(self, source_path: str) -> dict | None:
317
+ """Get full document content by path."""
318
+ conn = self.get_connection()
319
+ result = conn.execute(
320
+ "SELECT * FROM documents WHERE source_path = %s", (source_path,)
321
+ ).fetchone()
322
+ return dict(result) if result else None
323
+
324
+ def get_recent_documents(self, limit: int = 10) -> list[dict]:
325
+ """Get recently indexed documents."""
326
+ conn = self.get_connection()
327
+ results = conn.execute(
328
+ """
329
+ SELECT source_path, source_type, title, metadata, updated_at
330
+ FROM documents
331
+ ORDER BY updated_at DESC
332
+ LIMIT %s
333
+ """,
334
+ (limit,),
335
+ ).fetchall()
336
+ return [dict(r) for r in results]
337
+
338
+ def save_knowledge(
339
+ self,
340
+ title: str,
341
+ content: str,
342
+ tags: list[str] | None = None,
343
+ project: str | None = None,
344
+ ) -> dict:
345
+ """
346
+ Save a piece of knowledge directly from Claude.
347
+
348
+ Creates a virtual document (not file-backed) with embedding.
349
+ Returns the saved document info.
350
+ """
351
+ conn = self.get_connection()
352
+
353
+ # Generate unique source path for Claude-generated content
354
+ knowledge_id = str(uuid.uuid4())[:8]
355
+ timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
356
+ source_path = f"claude://knowledge/{timestamp}-{knowledge_id}"
357
+
358
+ # Build metadata
359
+ metadata = {}
360
+ if tags:
361
+ metadata["tags"] = tags
362
+ if project:
363
+ metadata["project"] = project
364
+ metadata["source"] = "claude"
365
+
366
+ # Content hash for deduplication
367
+ content_hash = hashlib.sha256(content.encode()).hexdigest()[:16]
368
+
369
+ # Check for duplicate content
370
+ existing = conn.execute(
371
+ "SELECT source_path, title FROM documents WHERE content_hash = %s",
372
+ (content_hash,),
373
+ ).fetchone()
374
+ if existing:
375
+ return {
376
+ "status": "duplicate",
377
+ "existing_path": existing["source_path"],
378
+ "existing_title": existing["title"],
379
+ }
380
+
381
+ # Build contextual embedding text
382
+ embedding_parts = [f"Document: {title}"]
383
+ if project:
384
+ embedding_parts.append(f"Project: {project}")
385
+ if tags:
386
+ embedding_parts.append(f"Topics: {', '.join(tags)}")
387
+ embedding_parts.append(f"Content: {content}")
388
+ embedding_text = "\n".join(embedding_parts)
389
+
390
+ # Generate embedding
391
+ embedding = embed_document(embedding_text)
392
+
393
+ # Insert document
394
+ doc_id = conn.execute(
395
+ """
396
+ INSERT INTO documents (source_path, source_type, title, content, metadata, content_hash)
397
+ VALUES (%s, %s, %s, %s, %s, %s)
398
+ RETURNING id
399
+ """,
400
+ (
401
+ source_path,
402
+ "claude-note",
403
+ title,
404
+ content,
405
+ psycopg.types.json.Json(metadata),
406
+ content_hash,
407
+ ),
408
+ ).fetchone()["id"]
409
+
410
+ # Insert single chunk
411
+ token_count = len(content) // 4 # Approximate
412
+ conn.execute(
413
+ """
414
+ INSERT INTO chunks (document_id, chunk_index, content, embedding_text, embedding, token_count, metadata)
415
+ VALUES (%s, %s, %s, %s, %s, %s, %s)
416
+ """,
417
+ (
418
+ doc_id,
419
+ 0,
420
+ content,
421
+ embedding_text,
422
+ embedding,
423
+ token_count,
424
+ psycopg.types.json.Json({}),
425
+ ),
426
+ )
427
+
428
+ conn.commit()
429
+
430
+ return {
431
+ "status": "saved",
432
+ "source_path": source_path,
433
+ "title": title,
434
+ "token_count": token_count,
435
+ }
436
+
437
+ def delete_knowledge(self, source_path: str) -> bool:
438
+ """Delete a Claude-saved knowledge entry by source path."""
439
+ if not source_path.startswith("claude://"):
440
+ return False
441
+
442
+ conn = self.get_connection()
443
+ result = conn.execute(
444
+ "DELETE FROM documents WHERE source_path = %s RETURNING id",
445
+ (source_path,),
446
+ ).fetchone()
447
+ conn.commit()
448
+ return result is not None
449
+
450
+ def save_todo(
451
+ self,
452
+ title: str,
453
+ content: str | None = None,
454
+ due_date: str | None = None,
455
+ priority: str | None = None,
456
+ project: str | None = None,
457
+ tags: list[str] | None = None,
458
+ ) -> dict:
459
+ """
460
+ Create a TODO item in the knowledge base.
461
+
462
+ Args:
463
+ title: TODO item title
464
+ content: Optional description/notes
465
+ due_date: Due date (ISO date or 'today'/'tomorrow')
466
+ priority: Priority ('A'/'B'/'C' or 1-5, 1=highest)
467
+ project: Project name
468
+ tags: List of tags
469
+
470
+ Returns:
471
+ Dict with status and saved document info
472
+ """
473
+ conn = self.get_connection()
474
+
475
+ # Generate unique source path
476
+ todo_id = str(uuid.uuid4())[:8]
477
+ timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
478
+ source_path = f"claude://todo/{timestamp}-{todo_id}"
479
+
480
+ # Parse priority: A=1, B=2, C=3, or numeric 1-5
481
+ parsed_priority = None
482
+ if priority:
483
+ priority_map = {"A": 1, "B": 2, "C": 3, "a": 1, "b": 2, "c": 3}
484
+ if priority.upper() in priority_map:
485
+ parsed_priority = priority_map[priority.upper()]
486
+ elif priority.isdigit() and 1 <= int(priority) <= 5:
487
+ parsed_priority = int(priority)
488
+
489
+ # Parse due_date
490
+ parsed_due_date = None
491
+ if due_date:
492
+ date_range = parse_date_range(due_date)
493
+ if date_range:
494
+ parsed_due_date = date_range[0] # Use start of range
495
+ else:
496
+ # Try ISO format
497
+ try:
498
+ parsed_due_date = datetime.fromisoformat(due_date.replace("Z", "+00:00"))
499
+ except ValueError:
500
+ pass
501
+
502
+ # Build metadata
503
+ metadata = {"source": "claude"}
504
+ if tags:
505
+ metadata["tags"] = tags
506
+ if project:
507
+ metadata["project"] = project
508
+
509
+ # Use content if provided, otherwise use title
510
+ doc_content = content if content else title
511
+
512
+ # Content hash for deduplication
513
+ content_hash = hashlib.sha256(f"{title}:{doc_content}".encode()).hexdigest()[:16]
514
+
515
+ # Build contextual embedding text
516
+ embedding_parts = [f"TODO: {title}"]
517
+ if project:
518
+ embedding_parts.append(f"Project: {project}")
519
+ if tags:
520
+ embedding_parts.append(f"Topics: {', '.join(tags)}")
521
+ if content:
522
+ embedding_parts.append(f"Details: {content}")
523
+ embedding_text = "\n".join(embedding_parts)
524
+
525
+ # Generate embedding
526
+ embedding = embed_document(embedding_text)
527
+
528
+ # Insert document with structured fields
529
+ doc_id = conn.execute(
530
+ """
531
+ INSERT INTO documents (
532
+ source_path, source_type, title, content, metadata, content_hash,
533
+ status, priority, due_date
534
+ )
535
+ VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
536
+ RETURNING id
537
+ """,
538
+ (
539
+ source_path,
540
+ "claude-todo",
541
+ title,
542
+ doc_content,
543
+ psycopg.types.json.Json(metadata),
544
+ content_hash,
545
+ "pending",
546
+ parsed_priority,
547
+ parsed_due_date,
548
+ ),
549
+ ).fetchone()["id"]
550
+
551
+ # Insert single chunk
552
+ token_count = len(doc_content) // 4 # Approximate
553
+ conn.execute(
554
+ """
555
+ INSERT INTO chunks (document_id, chunk_index, content, embedding_text, embedding, token_count, metadata)
556
+ VALUES (%s, %s, %s, %s, %s, %s, %s)
557
+ """,
558
+ (
559
+ doc_id,
560
+ 0,
561
+ doc_content,
562
+ embedding_text,
563
+ embedding,
564
+ token_count,
565
+ psycopg.types.json.Json({}),
566
+ ),
567
+ )
568
+
569
+ conn.commit()
570
+
571
+ return {
572
+ "status": "saved",
573
+ "source_path": source_path,
574
+ "title": title,
575
+ "priority": parsed_priority,
576
+ "due_date": str(parsed_due_date) if parsed_due_date else None,
577
+ }
578
+
579
+ def get_database_metadata(self) -> dict:
580
+ """Get LLM-enhanced database metadata."""
581
+ conn = self.get_connection()
582
+ results = conn.execute("SELECT key, value, source FROM database_metadata").fetchall()
583
+ return {r["key"]: {"value": r["value"], "source": r["source"]} for r in results}
584
+
585
+ def set_database_metadata(self, key: str, value: Any) -> bool:
586
+ """Set or update LLM-enhanced database metadata."""
587
+ conn = self.get_connection()
588
+ conn.execute(
589
+ """
590
+ INSERT INTO database_metadata (key, value, source, updated_at)
591
+ VALUES (%s, %s, 'llm', NOW())
592
+ ON CONFLICT (key) DO UPDATE SET
593
+ value = EXCLUDED.value,
594
+ source = 'llm',
595
+ updated_at = NOW()
596
+ """,
597
+ (key, psycopg.types.json.Json(value)),
598
+ )
599
+ conn.commit()
600
+ return True
601
+
602
+ def get_actionable_items(
603
+ self,
604
+ item_type: str | None = None,
605
+ status: str | None = None,
606
+ due_date: str | None = None,
607
+ event_date: str | None = None,
608
+ min_priority: int | None = None,
609
+ limit: int = 20,
610
+ ) -> list[dict]:
611
+ """
612
+ Query actionable items (tasks, events, emails) with structured filters.
613
+
614
+ Args:
615
+ item_type: Filter by source_type (e.g., 'todoist-task', 'gcal-event', 'gmail')
616
+ status: Filter by status ('pending', 'completed', etc.)
617
+ due_date: Filter tasks due on date ('today', 'tomorrow', 'this_week', 'YYYY-MM-DD')
618
+ event_date: Filter events on date ('today', 'tomorrow', 'this_week', 'YYYY-MM-DD')
619
+ min_priority: Filter items with priority <= this value (1=highest)
620
+ limit: Max results to return
621
+ """
622
+ conn = self.get_connection()
623
+
624
+ sql = """
625
+ SELECT
626
+ d.source_path,
627
+ d.source_type,
628
+ d.title,
629
+ d.content,
630
+ d.metadata,
631
+ d.due_date,
632
+ d.event_start,
633
+ d.event_end,
634
+ d.status,
635
+ d.priority
636
+ FROM documents d
637
+ WHERE 1=1
638
+ """
639
+ params: list[Any] = []
640
+
641
+ if item_type:
642
+ sql += " AND d.source_type = %s"
643
+ params.append(item_type)
644
+
645
+ if status:
646
+ sql += " AND d.status = %s"
647
+ params.append(status)
648
+
649
+ if due_date:
650
+ date_range = parse_date_range(due_date)
651
+ if date_range:
652
+ sql += " AND d.due_date >= %s AND d.due_date < %s"
653
+ params.extend(date_range)
654
+
655
+ if event_date:
656
+ date_range = parse_date_range(event_date)
657
+ if date_range:
658
+ # Event overlaps with date range
659
+ sql += " AND d.event_start < %s AND (d.event_end > %s OR d.event_end IS NULL)"
660
+ params.extend([date_range[1], date_range[0]])
661
+
662
+ if min_priority is not None:
663
+ sql += " AND d.priority IS NOT NULL AND d.priority <= %s"
664
+ params.append(min_priority)
665
+
666
+ # Order by: due_date/event_start (soonest first), then priority
667
+ sql += """
668
+ ORDER BY
669
+ COALESCE(d.due_date, d.event_start) ASC NULLS LAST,
670
+ d.priority ASC NULLS LAST
671
+ LIMIT %s
672
+ """
673
+ params.append(min(limit, config.max_limit))
674
+
675
+ results = conn.execute(sql, params).fetchall()
676
+ return [dict(r) for r in results]
677
+
678
+
679
+ def build_server_instructions(db_config) -> str | None:
680
+ """Build server instructions from database config and LLM metadata."""
681
+ parts = []
682
+ if db_config.description:
683
+ parts.append(db_config.description)
684
+ if db_config.topics:
685
+ parts.append(f"Topics: {', '.join(db_config.topics)}")
686
+ return " ".join(parts) if parts else None
687
+
688
+
689
+ # Initialize server and knowledge base
690
+ server = Server("knowledge-base")
691
+ kb: KnowledgeBase | None = None
692
+
693
+
694
+ @server.list_tools()
695
+ async def list_tools() -> list[Tool]:
696
+ """Define available tools for Claude Code."""
697
+ return [
698
+ Tool(
699
+ name="search_knowledge",
700
+ description=(
701
+ "Search the personal knowledge base for relevant information using semantic search. "
702
+ "Use this for finding notes, code snippets, documentation, or any previously indexed content. "
703
+ "Supports natural language queries - describe what you're looking for."
704
+ ),
705
+ inputSchema={
706
+ "type": "object",
707
+ "properties": {
708
+ "query": {
709
+ "type": "string",
710
+ "description": "Natural language search query describing what you're looking for",
711
+ },
712
+ "limit": {
713
+ "type": "integer",
714
+ "description": "Maximum results to return (default: 5, max: 20)",
715
+ "default": 5,
716
+ },
717
+ "source_type": {
718
+ "type": "string",
719
+ "enum": ["markdown", "code"],
720
+ "description": "Filter by source type (optional)",
721
+ },
722
+ "project": {
723
+ "type": "string",
724
+ "description": "Filter by project name (optional)",
725
+ },
726
+ "since": {
727
+ "type": "string",
728
+ "description": "Filter to documents modified since (ISO date or relative: '7d', '30d', '6mo')",
729
+ },
730
+ },
731
+ "required": ["query"],
732
+ },
733
+ ),
734
+ Tool(
735
+ name="keyword_search",
736
+ description=(
737
+ "Search by exact keywords using full-text search. "
738
+ "Better for code symbols, function names, class names, or specific terms "
739
+ "that semantic search might miss."
740
+ ),
741
+ inputSchema={
742
+ "type": "object",
743
+ "properties": {
744
+ "query": {
745
+ "type": "string",
746
+ "description": "Keywords to search for (e.g., 'select_related prefetch')",
747
+ },
748
+ "limit": {
749
+ "type": "integer",
750
+ "description": "Maximum results (default: 5)",
751
+ "default": 5,
752
+ },
753
+ "source_type": {
754
+ "type": "string",
755
+ "enum": ["markdown", "code"],
756
+ "description": "Filter by source type (optional)",
757
+ },
758
+ "since": {
759
+ "type": "string",
760
+ "description": "Filter to documents modified since (ISO date or relative: '7d', '30d', '6mo')",
761
+ },
762
+ },
763
+ "required": ["query"],
764
+ },
765
+ ),
766
+ Tool(
767
+ name="hybrid_search",
768
+ description=(
769
+ "Combined semantic and keyword search using Reciprocal Rank Fusion. "
770
+ "Use this when you want the best of both approaches - semantic understanding "
771
+ "plus exact matching."
772
+ ),
773
+ inputSchema={
774
+ "type": "object",
775
+ "properties": {
776
+ "query": {
777
+ "type": "string",
778
+ "description": "Search query",
779
+ },
780
+ "limit": {
781
+ "type": "integer",
782
+ "description": "Maximum results (default: 5)",
783
+ "default": 5,
784
+ },
785
+ "source_type": {
786
+ "type": "string",
787
+ "enum": ["markdown", "code"],
788
+ "description": "Filter by source type (optional)",
789
+ },
790
+ "since": {
791
+ "type": "string",
792
+ "description": "Filter to documents modified since (ISO date or relative: '7d', '30d', '6mo')",
793
+ },
794
+ },
795
+ "required": ["query"],
796
+ },
797
+ ),
798
+ Tool(
799
+ name="get_document",
800
+ description=(
801
+ "Retrieve the full content of a specific document by its source path. "
802
+ "Use after finding relevant chunks to get complete context."
803
+ ),
804
+ inputSchema={
805
+ "type": "object",
806
+ "properties": {
807
+ "source_path": {
808
+ "type": "string",
809
+ "description": "Absolute path to the document (from search results)",
810
+ },
811
+ },
812
+ "required": ["source_path"],
813
+ },
814
+ ),
815
+ Tool(
816
+ name="list_sources",
817
+ description="List all indexed source types with document and chunk counts.",
818
+ inputSchema={
819
+ "type": "object",
820
+ "properties": {},
821
+ },
822
+ ),
823
+ Tool(
824
+ name="list_projects",
825
+ description="List all known project names in the knowledge base.",
826
+ inputSchema={
827
+ "type": "object",
828
+ "properties": {},
829
+ },
830
+ ),
831
+ Tool(
832
+ name="recent_documents",
833
+ description="Get recently indexed or updated documents.",
834
+ inputSchema={
835
+ "type": "object",
836
+ "properties": {
837
+ "limit": {
838
+ "type": "integer",
839
+ "description": "Number of documents (default: 10)",
840
+ "default": 10,
841
+ },
842
+ },
843
+ },
844
+ ),
845
+ Tool(
846
+ name="save_knowledge",
847
+ description=(
848
+ "Save a piece of knowledge to the knowledge base for future reference. "
849
+ "Use this to remember solutions, patterns, debugging tips, architectural decisions, "
850
+ "or any useful information discovered during this conversation. "
851
+ "The knowledge will be searchable in future sessions."
852
+ ),
853
+ inputSchema={
854
+ "type": "object",
855
+ "properties": {
856
+ "title": {
857
+ "type": "string",
858
+ "description": "Short descriptive title for this knowledge",
859
+ },
860
+ "content": {
861
+ "type": "string",
862
+ "description": "The knowledge content to save",
863
+ },
864
+ "tags": {
865
+ "type": "array",
866
+ "items": {"type": "string"},
867
+ "description": "Categorization tags (e.g., ['python', 'debugging', 'django'])",
868
+ },
869
+ "project": {
870
+ "type": "string",
871
+ "description": "Associated project name (optional)",
872
+ },
873
+ },
874
+ "required": ["title", "content"],
875
+ },
876
+ ),
877
+ Tool(
878
+ name="delete_knowledge",
879
+ description=(
880
+ "Delete a previously saved knowledge entry by its source path. "
881
+ "Only works for Claude-saved entries (claude:// paths)."
882
+ ),
883
+ inputSchema={
884
+ "type": "object",
885
+ "properties": {
886
+ "source_path": {
887
+ "type": "string",
888
+ "description": "The source path of the knowledge entry to delete",
889
+ },
890
+ },
891
+ "required": ["source_path"],
892
+ },
893
+ ),
894
+ Tool(
895
+ name="get_actionable_items",
896
+ description=(
897
+ "Query actionable items like tasks, calendar events, and emails "
898
+ "with structured filters. Use this for daily briefs, finding tasks due soon, "
899
+ "or checking today's schedule. Filters by status, due date, event date, priority."
900
+ ),
901
+ inputSchema={
902
+ "type": "object",
903
+ "properties": {
904
+ "item_type": {
905
+ "type": "string",
906
+ "description": (
907
+ "Filter by source type (e.g., 'todoist-task', 'gcal-event')"
908
+ ),
909
+ },
910
+ "status": {
911
+ "type": "string",
912
+ "description": "Filter by status ('pending', 'completed', 'cancelled')",
913
+ },
914
+ "due_date": {
915
+ "type": "string",
916
+ "description": (
917
+ "Filter tasks by due date: 'today', 'tomorrow', 'this_week', "
918
+ "'next_week', or 'YYYY-MM-DD'"
919
+ ),
920
+ },
921
+ "event_date": {
922
+ "type": "string",
923
+ "description": (
924
+ "Filter events by date: 'today', 'tomorrow', 'this_week', "
925
+ "'next_week', or 'YYYY-MM-DD'"
926
+ ),
927
+ },
928
+ "min_priority": {
929
+ "type": "integer",
930
+ "description": (
931
+ "Filter by priority (1=highest). Returns items <= this value."
932
+ ),
933
+ "minimum": 1,
934
+ "maximum": 5,
935
+ },
936
+ "limit": {
937
+ "type": "integer",
938
+ "description": "Maximum results to return (default: 20)",
939
+ "default": 20,
940
+ },
941
+ },
942
+ },
943
+ ),
944
+ Tool(
945
+ name="get_database_info",
946
+ description=(
947
+ "Get information about this knowledge base including its description, topics, "
948
+ "and content statistics. Call this at the start of a session to understand what's "
949
+ "available. If the description/topics are empty or seem outdated, you SHOULD "
950
+ "explore the database (list_sources, recent_documents, sample searches) and call "
951
+ "set_database_description to document it for future sessions."
952
+ ),
953
+ inputSchema={
954
+ "type": "object",
955
+ "properties": {},
956
+ },
957
+ ),
958
+ Tool(
959
+ name="set_database_description",
960
+ description=(
961
+ "Update the knowledge base description and topics based on your analysis of "
962
+ "its contents. Use this after exploring the database to help future sessions "
963
+ "understand what kind of information is stored here. Describe the content and "
964
+ "purpose, not just stats. Good: 'Django backend for education platform with "
965
+ "student enrollment and grading'. Bad: '2500 code files, 63 markdown docs'."
966
+ ),
967
+ inputSchema={
968
+ "type": "object",
969
+ "properties": {
970
+ "description": {
971
+ "type": "string",
972
+ "description": (
973
+ "A concise description of what this knowledge base contains "
974
+ "(1-3 sentences, e.g., 'Personal notes on farming, including crop "
975
+ "planning, livestock management, and equipment maintenance')"
976
+ ),
977
+ },
978
+ "topics": {
979
+ "type": "array",
980
+ "items": {"type": "string"},
981
+ "description": (
982
+ "List of topic keywords that characterize the content "
983
+ "(e.g., ['farming', 'crops', 'livestock', 'equipment'])"
984
+ ),
985
+ },
986
+ },
987
+ },
988
+ ),
989
+ Tool(
990
+ name="add_todo",
991
+ description=(
992
+ "Create a TODO item in the knowledge base. Use this to capture tasks, "
993
+ "action items, or reminders that come up during conversation. "
994
+ "The TODO will be queryable via get_actionable_items."
995
+ ),
996
+ inputSchema={
997
+ "type": "object",
998
+ "properties": {
999
+ "title": {
1000
+ "type": "string",
1001
+ "description": "TODO item title",
1002
+ },
1003
+ "content": {
1004
+ "type": "string",
1005
+ "description": "Optional description or notes",
1006
+ },
1007
+ "due_date": {
1008
+ "type": "string",
1009
+ "description": ("Due date: ISO date (YYYY-MM-DD), 'today', or 'tomorrow'"),
1010
+ },
1011
+ "priority": {
1012
+ "type": "string",
1013
+ "description": "Priority: 'A'/'B'/'C' or 1-5 (1=highest)",
1014
+ },
1015
+ "project": {
1016
+ "type": "string",
1017
+ "description": "Project name",
1018
+ },
1019
+ "tags": {
1020
+ "type": "array",
1021
+ "items": {"type": "string"},
1022
+ "description": "Categorization tags",
1023
+ },
1024
+ },
1025
+ "required": ["title"],
1026
+ },
1027
+ ),
1028
+ ]
1029
+
1030
+
1031
+ def format_search_results(results: list[dict], show_similarity: bool = True) -> str:
1032
+ """Format search results for display."""
1033
+ if not results:
1034
+ return "No relevant results found."
1035
+
1036
+ output = []
1037
+ for r in results:
1038
+ header = f"## {r['title']} ({r['source_type']})"
1039
+ source = f"**Source:** `{r['source_path']}`"
1040
+
1041
+ # Add document date if available
1042
+ date_line = ""
1043
+ if doc_meta := r.get("doc_metadata"):
1044
+ if doc_date := get_document_date(doc_meta):
1045
+ date_line = f"\n**Modified:** {format_relative_time(doc_date)}"
1046
+
1047
+ if show_similarity and "similarity" in r:
1048
+ score = f"**Relevance:** {r['similarity']:.1%}"
1049
+ output.append(f"{header}\n{source}\n{score}{date_line}\n\n{r['content']}\n\n---")
1050
+ elif "rank" in r:
1051
+ output.append(f"{header}\n{source}{date_line}\n\n{r['content']}\n\n---")
1052
+ else:
1053
+ output.append(f"{header}\n{source}{date_line}\n\n{r['content']}\n\n---")
1054
+
1055
+ return "\n\n".join(output)
1056
+
1057
+
1058
+ def format_actionable_items(items: list[dict]) -> str:
1059
+ """Format actionable items (tasks, events, emails) for display."""
1060
+ if not items:
1061
+ return "No actionable items found matching the criteria."
1062
+
1063
+ output = ["## Actionable Items\n"]
1064
+
1065
+ for item in items:
1066
+ title = item.get("title") or "Untitled"
1067
+ source_type = item.get("source_type", "unknown")
1068
+ status = item.get("status")
1069
+ priority = item.get("priority")
1070
+
1071
+ # Build header with status and priority indicators
1072
+ status_icon = {"pending": "[ ]", "completed": "[x]", "cancelled": "[-]"}.get(
1073
+ status or "", "[ ]"
1074
+ )
1075
+ priority_str = f" P{priority}" if priority else ""
1076
+ header = f"{status_icon} **{title}**{priority_str} ({source_type})"
1077
+
1078
+ # Build date info
1079
+ date_parts = []
1080
+ if due := item.get("due_date"):
1081
+ date_parts.append(f"Due: {format_relative_time(str(due))}")
1082
+ if start := item.get("event_start"):
1083
+ if end := item.get("event_end"):
1084
+ date_parts.append(f"Event: {start} - {end}")
1085
+ else:
1086
+ date_parts.append(f"Event: {start}")
1087
+ date_line = " | ".join(date_parts) if date_parts else ""
1088
+
1089
+ # Content preview (truncate if long)
1090
+ content = item.get("content", "")
1091
+ if len(content) > 200:
1092
+ content = content[:200] + "..."
1093
+
1094
+ parts = [header]
1095
+ if date_line:
1096
+ parts.append(date_line)
1097
+ if content:
1098
+ parts.append(content)
1099
+ parts.append(f"`{item.get('source_path', '')}`")
1100
+ parts.append("---")
1101
+
1102
+ output.append("\n".join(parts))
1103
+
1104
+ return "\n\n".join(output)
1105
+
1106
+
1107
+ @server.call_tool()
1108
+ async def call_tool(name: str, arguments: dict[str, Any]) -> CallToolResult:
1109
+ """Handle tool invocations from Claude Code."""
1110
+ try:
1111
+ if name == "search_knowledge":
1112
+ results = kb.semantic_search(
1113
+ query=arguments["query"],
1114
+ limit=arguments.get("limit", 5),
1115
+ source_type=arguments.get("source_type"),
1116
+ project=arguments.get("project"),
1117
+ since=arguments.get("since"),
1118
+ )
1119
+ return CallToolResult(
1120
+ content=[TextContent(type="text", text=format_search_results(results))]
1121
+ )
1122
+
1123
+ elif name == "keyword_search":
1124
+ results = kb.keyword_search(
1125
+ query=arguments["query"],
1126
+ limit=arguments.get("limit", 5),
1127
+ source_type=arguments.get("source_type"),
1128
+ since=arguments.get("since"),
1129
+ )
1130
+ return CallToolResult(
1131
+ content=[
1132
+ TextContent(
1133
+ type="text", text=format_search_results(results, show_similarity=False)
1134
+ )
1135
+ ]
1136
+ )
1137
+
1138
+ elif name == "hybrid_search":
1139
+ results = kb.hybrid_search(
1140
+ query=arguments["query"],
1141
+ limit=arguments.get("limit", 5),
1142
+ source_type=arguments.get("source_type"),
1143
+ since=arguments.get("since"),
1144
+ )
1145
+ return CallToolResult(
1146
+ content=[
1147
+ TextContent(
1148
+ type="text", text=format_search_results(results, show_similarity=False)
1149
+ )
1150
+ ]
1151
+ )
1152
+
1153
+ elif name == "get_document":
1154
+ doc = kb.get_document(arguments["source_path"])
1155
+ if not doc:
1156
+ return CallToolResult(
1157
+ content=[TextContent(type="text", text="Document not found.")]
1158
+ )
1159
+ return CallToolResult(
1160
+ content=[TextContent(type="text", text=f"# {doc['title']}\n\n{doc['content']}")]
1161
+ )
1162
+
1163
+ elif name == "list_sources":
1164
+ sources = kb.list_sources()
1165
+ if not sources:
1166
+ return CallToolResult(
1167
+ content=[TextContent(type="text", text="No documents indexed yet.")]
1168
+ )
1169
+ output = ["## Indexed Sources\n"]
1170
+ for s in sources:
1171
+ tokens = s.get("total_tokens") or 0
1172
+ output.append(
1173
+ f"- **{s['source_type']}**: {s['document_count']} documents, "
1174
+ f"{s['chunk_count']} chunks (~{tokens:,} tokens)"
1175
+ )
1176
+ return CallToolResult(content=[TextContent(type="text", text="\n".join(output))])
1177
+
1178
+ elif name == "list_projects":
1179
+ projects = kb.list_projects()
1180
+ if not projects:
1181
+ return CallToolResult(content=[TextContent(type="text", text="No projects found.")])
1182
+ return CallToolResult(
1183
+ content=[
1184
+ TextContent(
1185
+ type="text", text="## Projects\n\n" + "\n".join(f"- {p}" for p in projects)
1186
+ )
1187
+ ]
1188
+ )
1189
+
1190
+ elif name == "recent_documents":
1191
+ docs = kb.get_recent_documents(arguments.get("limit", 10))
1192
+ if not docs:
1193
+ return CallToolResult(
1194
+ content=[TextContent(type="text", text="No documents indexed yet.")]
1195
+ )
1196
+ output = ["## Recent Documents\n"]
1197
+ for d in docs:
1198
+ project = d["metadata"].get("project", "")
1199
+ project_str = f" [{project}]" if project else ""
1200
+ date_str = ""
1201
+ if doc_date := get_document_date(d["metadata"]):
1202
+ date_str = f" - {format_relative_time(doc_date)}"
1203
+ output.append(f"- **{d['title']}**{project_str} ({d['source_type']}){date_str}")
1204
+ output.append(f" `{d['source_path']}`")
1205
+ return CallToolResult(content=[TextContent(type="text", text="\n".join(output))])
1206
+
1207
+ elif name == "save_knowledge":
1208
+ result = kb.save_knowledge(
1209
+ title=arguments["title"],
1210
+ content=arguments["content"],
1211
+ tags=arguments.get("tags"),
1212
+ project=arguments.get("project"),
1213
+ )
1214
+ if result["status"] == "duplicate":
1215
+ return CallToolResult(
1216
+ content=[
1217
+ TextContent(
1218
+ type="text",
1219
+ text=(
1220
+ f"Duplicate content already exists:\n"
1221
+ f"- Title: {result['existing_title']}\n"
1222
+ f"- Path: `{result['existing_path']}`"
1223
+ ),
1224
+ )
1225
+ ]
1226
+ )
1227
+ return CallToolResult(
1228
+ content=[
1229
+ TextContent(
1230
+ type="text",
1231
+ text=(
1232
+ f"Knowledge saved successfully:\n"
1233
+ f"- Title: {result['title']}\n"
1234
+ f"- Path: `{result['source_path']}`\n"
1235
+ f"- Tokens: ~{result['token_count']}"
1236
+ ),
1237
+ )
1238
+ ]
1239
+ )
1240
+
1241
+ elif name == "delete_knowledge":
1242
+ deleted = kb.delete_knowledge(arguments["source_path"])
1243
+ if deleted:
1244
+ return CallToolResult(
1245
+ content=[TextContent(type="text", text="Knowledge entry deleted.")]
1246
+ )
1247
+ return CallToolResult(
1248
+ content=[
1249
+ TextContent(
1250
+ type="text",
1251
+ text="Could not delete. Entry not found or not a Claude-saved entry.",
1252
+ )
1253
+ ]
1254
+ )
1255
+
1256
+ elif name == "get_actionable_items":
1257
+ items = kb.get_actionable_items(
1258
+ item_type=arguments.get("item_type"),
1259
+ status=arguments.get("status"),
1260
+ due_date=arguments.get("due_date"),
1261
+ event_date=arguments.get("event_date"),
1262
+ min_priority=arguments.get("min_priority"),
1263
+ limit=arguments.get("limit", 20),
1264
+ )
1265
+ return CallToolResult(
1266
+ content=[TextContent(type="text", text=format_actionable_items(items))]
1267
+ )
1268
+
1269
+ elif name == "get_database_info":
1270
+ # Get config-based info
1271
+ db_config = config.get_database()
1272
+ info_parts = ["## Knowledge Base Info\n"]
1273
+
1274
+ # Config-defined description/topics
1275
+ if db_config.description:
1276
+ info_parts.append(f"**Description (config):** {db_config.description}")
1277
+ if db_config.topics:
1278
+ info_parts.append(f"**Topics (config):** {', '.join(db_config.topics)}")
1279
+
1280
+ # LLM-enhanced metadata
1281
+ llm_desc = None
1282
+ llm_topics = None
1283
+ try:
1284
+ metadata = kb.get_database_metadata()
1285
+ llm_desc = metadata.get("llm_description", {}).get("value")
1286
+ llm_topics = metadata.get("llm_topics", {}).get("value")
1287
+ if llm_desc:
1288
+ info_parts.append(f"**Description (LLM-enhanced):** {llm_desc}")
1289
+ if llm_topics:
1290
+ info_parts.append(f"**Topics (LLM-enhanced):** {', '.join(llm_topics)}")
1291
+ except Exception:
1292
+ pass # Table may not exist yet
1293
+
1294
+ # Content stats
1295
+ sources = kb.list_sources()
1296
+ if sources:
1297
+ info_parts.append("\n### Content Statistics")
1298
+ for s in sources:
1299
+ tokens = s.get("total_tokens") or 0
1300
+ info_parts.append(
1301
+ f"- **{s['source_type']}**: {s['document_count']} documents, "
1302
+ f"{s['chunk_count']} chunks (~{tokens:,} tokens)"
1303
+ )
1304
+
1305
+ # Projects
1306
+ projects = kb.list_projects()
1307
+ if projects:
1308
+ info_parts.append(f"\n### Projects\n{', '.join(projects)}")
1309
+
1310
+ # Hint if no description exists
1311
+ if not db_config.description and not llm_desc:
1312
+ info_parts.append(
1313
+ "\n**Note:** No description set. Consider exploring with "
1314
+ "recent_documents and search_knowledge, then calling "
1315
+ "set_database_description to document what's here."
1316
+ )
1317
+
1318
+ return CallToolResult(content=[TextContent(type="text", text="\n".join(info_parts))])
1319
+
1320
+ elif name == "set_database_description":
1321
+ updated = []
1322
+ if "description" in arguments:
1323
+ kb.set_database_metadata("llm_description", arguments["description"])
1324
+ updated.append("description")
1325
+ if "topics" in arguments:
1326
+ kb.set_database_metadata("llm_topics", arguments["topics"])
1327
+ updated.append("topics")
1328
+ if updated:
1329
+ return CallToolResult(
1330
+ content=[
1331
+ TextContent(
1332
+ type="text",
1333
+ text=f"Updated database metadata: {', '.join(updated)}",
1334
+ )
1335
+ ]
1336
+ )
1337
+ return CallToolResult(
1338
+ content=[TextContent(type="text", text="No fields provided to update.")]
1339
+ )
1340
+
1341
+ elif name == "add_todo":
1342
+ result = kb.save_todo(
1343
+ title=arguments["title"],
1344
+ content=arguments.get("content"),
1345
+ due_date=arguments.get("due_date"),
1346
+ priority=arguments.get("priority"),
1347
+ project=arguments.get("project"),
1348
+ tags=arguments.get("tags"),
1349
+ )
1350
+ parts = [
1351
+ "TODO created:",
1352
+ f"- Title: {result['title']}",
1353
+ f"- Path: `{result['source_path']}`",
1354
+ ]
1355
+ if result.get("priority"):
1356
+ parts.append(f"- Priority: P{result['priority']}")
1357
+ if result.get("due_date"):
1358
+ parts.append(f"- Due: {result['due_date']}")
1359
+ return CallToolResult(content=[TextContent(type="text", text="\n".join(parts))])
1360
+
1361
+ else:
1362
+ return CallToolResult(content=[TextContent(type="text", text=f"Unknown tool: {name}")])
1363
+
1364
+ except Exception as e:
1365
+ return CallToolResult(content=[TextContent(type="text", text=f"Error: {e!s}")])
1366
+
1367
+
1368
+ async def main(db_url: str | None = None, db_name: str | None = None):
1369
+ """Run the MCP server."""
1370
+ global kb
1371
+
1372
+ # Get database config
1373
+ db_config = config.get_database(db_name)
1374
+
1375
+ # Initialize knowledge base with provided URL or from config
1376
+ if db_url is None:
1377
+ db_url = db_config.url
1378
+ kb = KnowledgeBase(db_url)
1379
+
1380
+ # Set server instructions from config
1381
+ server.instructions = build_server_instructions(db_config)
1382
+
1383
+ # Pre-warm embedding model
1384
+ print("Warming up embedding model...", file=sys.stderr)
1385
+ warmup()
1386
+ print("Ready.", file=sys.stderr)
1387
+
1388
+ async with stdio_server() as (read_stream, write_stream):
1389
+ await server.run(read_stream, write_stream, server.create_initialization_options())
1390
+
1391
+
1392
+ if __name__ == "__main__":
1393
+ asyncio.run(main())