roampal 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. roampal/__init__.py +29 -0
  2. roampal/__main__.py +6 -0
  3. roampal/backend/__init__.py +1 -0
  4. roampal/backend/modules/__init__.py +1 -0
  5. roampal/backend/modules/memory/__init__.py +43 -0
  6. roampal/backend/modules/memory/chromadb_adapter.py +623 -0
  7. roampal/backend/modules/memory/config.py +102 -0
  8. roampal/backend/modules/memory/content_graph.py +543 -0
  9. roampal/backend/modules/memory/context_service.py +455 -0
  10. roampal/backend/modules/memory/embedding_service.py +96 -0
  11. roampal/backend/modules/memory/knowledge_graph_service.py +1052 -0
  12. roampal/backend/modules/memory/memory_bank_service.py +433 -0
  13. roampal/backend/modules/memory/memory_types.py +296 -0
  14. roampal/backend/modules/memory/outcome_service.py +400 -0
  15. roampal/backend/modules/memory/promotion_service.py +473 -0
  16. roampal/backend/modules/memory/routing_service.py +444 -0
  17. roampal/backend/modules/memory/scoring_service.py +324 -0
  18. roampal/backend/modules/memory/search_service.py +646 -0
  19. roampal/backend/modules/memory/tests/__init__.py +1 -0
  20. roampal/backend/modules/memory/tests/conftest.py +12 -0
  21. roampal/backend/modules/memory/tests/unit/__init__.py +1 -0
  22. roampal/backend/modules/memory/tests/unit/conftest.py +7 -0
  23. roampal/backend/modules/memory/tests/unit/test_knowledge_graph_service.py +517 -0
  24. roampal/backend/modules/memory/tests/unit/test_memory_bank_service.py +504 -0
  25. roampal/backend/modules/memory/tests/unit/test_outcome_service.py +485 -0
  26. roampal/backend/modules/memory/tests/unit/test_scoring_service.py +255 -0
  27. roampal/backend/modules/memory/tests/unit/test_search_service.py +413 -0
  28. roampal/backend/modules/memory/tests/unit/test_unified_memory_system.py +418 -0
  29. roampal/backend/modules/memory/unified_memory_system.py +1277 -0
  30. roampal/cli.py +638 -0
  31. roampal/hooks/__init__.py +16 -0
  32. roampal/hooks/session_manager.py +587 -0
  33. roampal/hooks/stop_hook.py +176 -0
  34. roampal/hooks/user_prompt_submit_hook.py +103 -0
  35. roampal/mcp/__init__.py +7 -0
  36. roampal/mcp/server.py +611 -0
  37. roampal/server/__init__.py +7 -0
  38. roampal/server/main.py +744 -0
  39. roampal-0.1.4.dist-info/METADATA +179 -0
  40. roampal-0.1.4.dist-info/RECORD +44 -0
  41. roampal-0.1.4.dist-info/WHEEL +5 -0
  42. roampal-0.1.4.dist-info/entry_points.txt +2 -0
  43. roampal-0.1.4.dist-info/licenses/LICENSE +190 -0
  44. roampal-0.1.4.dist-info/top_level.txt +1 -0
roampal/mcp/server.py ADDED
@@ -0,0 +1,611 @@
1
+ """
2
+ Roampal MCP Server - Persistent Memory for AI Coding Tools
3
+
4
+ SETUP INSTRUCTIONS (tell user if they ask "how do I set up roampal"):
5
+ 1. pip install roampal
6
+ 2. roampal init # Configures Claude Code / Cursor automatically
7
+ 3. (MCP server auto-starts FastAPI hook server - no manual start needed)
8
+ 4. (Optional) roampal ingest myfile.pdf # Add documents to books collection
9
+
10
+ CLI COMMANDS:
11
+ - roampal init: Auto-configure Claude Code hooks + MCP, or Cursor MCP
12
+ - roampal ingest <file>: Ingest .txt/.md/.pdf into books collection
13
+ - roampal status: Check if server is running
14
+ - roampal stats: Show memory statistics
15
+
16
+ MCP TOOLS (available after setup):
17
+ - get_context_insights: Get context before responding (user profile, relevant memories)
18
+ - search_memory: Search across memory collections (for detailed lookups)
19
+ - add_to_memory_bank: Store permanent user facts
20
+ - update_memory: Update existing memories
21
+ - archive_memory: Archive outdated memories
22
+ - record_response: Complete the interaction (key_takeaway + outcome scoring)
23
+
24
+ WORKFLOW:
25
+ 1. get_context_insights(query) - Get what you know about this topic
26
+ 2. search_memory() - If you need more details
27
+ 3. Respond to user
28
+ 4. record_response(key_takeaway, outcome) - Close the loop for learning
29
+
30
+ HOW IT WORKS:
31
+ - MCP server auto-starts FastAPI hook server on port 27182 (background thread)
32
+ - Hooks auto-inject relevant memories into your context (invisible to user)
33
+ - Cold start: First message of session dumps full user profile
34
+ - Scoring: record_response scores cached memories based on outcome
35
+ - Learning: Good memories get promoted, bad ones get demoted/deleted
36
+ - 5 collections: books (docs), memory_bank (facts), patterns (proven), history (past), working (session)
37
+ """
38
+
39
+ import logging
40
+ import json
41
+ import asyncio
42
+ import threading
43
+ import socket
44
+ from datetime import datetime
45
+ from typing import Optional, Dict, Any, List
46
+
47
+ from mcp.server import Server
48
+ from mcp.server.stdio import stdio_server
49
+ from mcp import types
50
+
51
+ from roampal.backend.modules.memory import UnifiedMemorySystem
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+ # Global memory system
56
+ _memory: Optional[UnifiedMemorySystem] = None
57
+
58
+ # Session cache for outcome tracking
59
+ _mcp_search_cache: Dict[str, Dict[str, Any]] = {}
60
+
61
+ # Flag to track if FastAPI server is running
62
+ _fastapi_started = False
63
+
64
+
65
+ def _is_port_in_use(port: int) -> bool:
66
+ """Check if a port is already in use."""
67
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
68
+ try:
69
+ s.bind(("127.0.0.1", port))
70
+ return False
71
+ except socket.error:
72
+ return True
73
+
74
+
75
+ def _start_fastapi_server():
76
+ """
77
+ Start FastAPI hook server in background subprocess.
78
+
79
+ This enables hooks to work without requiring a separate 'roampal start' command.
80
+ When Claude Code launches the MCP server, the hook server starts automatically.
81
+
82
+ Uses subprocess instead of threading to avoid event loop conflicts between
83
+ uvicorn and the MCP server's asyncio loop.
84
+ """
85
+ global _fastapi_started
86
+
87
+ if _fastapi_started:
88
+ return
89
+
90
+ # Check if port is already in use (server already running externally)
91
+ if _is_port_in_use(27182):
92
+ logger.info("FastAPI hook server already running on port 27182")
93
+ _fastapi_started = True
94
+ return
95
+
96
+ import subprocess
97
+ import sys
98
+
99
+ try:
100
+ # Start FastAPI server as a subprocess
101
+ # Use the same Python that's running this MCP server
102
+ subprocess.Popen(
103
+ [sys.executable, "-m", "roampal.server.main"],
104
+ stdout=subprocess.DEVNULL,
105
+ stderr=subprocess.DEVNULL,
106
+ # Don't inherit stdin (MCP uses it)
107
+ stdin=subprocess.DEVNULL,
108
+ # Detach from parent process group on Windows
109
+ creationflags=subprocess.CREATE_NO_WINDOW if sys.platform == "win32" else 0,
110
+ )
111
+ _fastapi_started = True
112
+ logger.info("Started FastAPI hook server on port 27182 (subprocess)")
113
+ except Exception as e:
114
+ logger.error(f"Failed to start FastAPI hook server: {e}")
115
+
116
+
117
+ def _detect_mcp_client() -> str:
118
+ """Detect MCP client for session tracking."""
119
+ # Use "default" to match the hook's fallback conversation_id
120
+ # This ensures MCP tool calls and hook injections share the same cache
121
+ return "default"
122
+
123
+
124
+ async def _initialize_memory():
125
+ """Initialize memory system if needed."""
126
+ global _memory
127
+ if _memory is None:
128
+ _memory = UnifiedMemorySystem()
129
+ await _memory.initialize()
130
+ logger.info("MCP: Memory system initialized")
131
+
132
+
133
+ def run_mcp_server():
134
+ """Run the MCP server (auto-starts FastAPI hook server)."""
135
+ # Start FastAPI hook server in background thread
136
+ _start_fastapi_server()
137
+
138
+ server = Server("roampal")
139
+
140
+ @server.list_tools()
141
+ async def list_tools() -> list[types.Tool]:
142
+ """List available MCP tools."""
143
+ return [
144
+ types.Tool(
145
+ name="search_memory",
146
+ description="""Search your persistent memory. Use when you need details beyond what get_context_insights returned.
147
+
148
+ WHEN TO SEARCH:
149
+ • User says "remember", "I told you", "we discussed" → search immediately
150
+ • get_context_insights recommended a collection → search that collection
151
+ • You need more detail than the context provided
152
+
153
+ WHEN NOT TO SEARCH:
154
+ • General knowledge questions (use your training)
155
+ • get_context_insights already gave you the answer
156
+
157
+ Collections: memory_bank (user facts), books (docs), patterns (proven solutions), history (past), working (recent)
158
+ Omit 'collections' parameter for auto-routing (recommended).""",
159
+ inputSchema={
160
+ "type": "object",
161
+ "properties": {
162
+ "query": {
163
+ "type": "string",
164
+ "description": "Search query - use the users EXACT words/phrases, do NOT simplify or extract keywords"
165
+ },
166
+ "collections": {
167
+ "type": "array",
168
+ "items": {"type": "string", "enum": ["books", "working", "history", "patterns", "memory_bank"]},
169
+ "description": "Which collections to search. Omit for auto-routing (recommended). Manual: books, working, history, patterns, memory_bank",
170
+ "default": None
171
+ },
172
+ "limit": {
173
+ "type": "integer",
174
+ "description": "Number of results (1-20)",
175
+ "default": 5,
176
+ "minimum": 1,
177
+ "maximum": 20
178
+ },
179
+ "metadata": {
180
+ "type": "object",
181
+ "description": "Optional filters. Use sparingly. Examples: timestamp='2025-11-12', last_outcome='worked', has_code=true",
182
+ "additionalProperties": True
183
+ }
184
+ },
185
+ "required": ["query"]
186
+ }
187
+ ),
188
+ types.Tool(
189
+ name="add_to_memory_bank",
190
+ description="""Store PERMANENT facts that help maintain continuity across sessions.
191
+
192
+ WHAT BELONGS HERE:
193
+ • User identity (name, role, background)
194
+ • Preferences (communication style, tools, workflows)
195
+ • Goals and projects (what they're working on, priorities)
196
+ • Progress tracking (what worked, what failed, strategy iterations)
197
+ • Useful context that would be lost between sessions
198
+
199
+ WHAT DOES NOT BELONG:
200
+ • Raw conversation exchanges (auto-captured in working/history)
201
+ • Temporary session facts (current task details)
202
+ • Every fact you hear - be SELECTIVE, this is for PERMANENT knowledge
203
+
204
+ Rule of thumb: If it helps maintain continuity across sessions OR enables learning/improvement, store it. If it's session-specific, don't.
205
+
206
+ Note: memory_bank is NOT outcome-scored. Facts persist until archived.""",
207
+ inputSchema={
208
+ "type": "object",
209
+ "properties": {
210
+ "content": {"type": "string", "description": "The fact to remember"},
211
+ "tags": {"type": "array", "items": {"type": "string"}, "description": "Categories: identity, preference, goal, project, system_mastery, agent_growth"},
212
+ "importance": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.7, "description": "How critical (0.0-1.0)"},
213
+ "confidence": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.7, "description": "How certain (0.0-1.0)"}
214
+ },
215
+ "required": ["content"]
216
+ }
217
+ ),
218
+ types.Tool(
219
+ name="update_memory",
220
+ description="Update existing memory when information changes or needs correction.",
221
+ inputSchema={
222
+ "type": "object",
223
+ "properties": {
224
+ "old_content": {"type": "string", "description": "Old/incorrect fact to find"},
225
+ "new_content": {"type": "string", "description": "Corrected/updated fact"}
226
+ },
227
+ "required": ["old_content", "new_content"]
228
+ }
229
+ ),
230
+ types.Tool(
231
+ name="archive_memory",
232
+ description="Archive outdated/irrelevant memories from memory_bank.",
233
+ inputSchema={
234
+ "type": "object",
235
+ "properties": {
236
+ "content": {"type": "string", "description": "Memory to archive (semantic match)"}
237
+ },
238
+ "required": ["content"]
239
+ }
240
+ ),
241
+ types.Tool(
242
+ name="score_response",
243
+ description="""Score the previous exchange. ONLY use when the <roampal-score-required> hook prompt appears.
244
+
245
+ ⚠️ IMPORTANT: This tool is ONLY for scoring when prompted by the hook. Do NOT call it at other times.
246
+ For storing important learnings at any time, use record_response(key_takeaway="...") instead.
247
+
248
+ OUTCOME DETECTION (read user's reaction):
249
+ ✓ worked = user satisfied, says thanks, moves on
250
+ ✗ failed = user corrects you, says "no", "that's wrong", provides the right answer
251
+ ~ partial = user says "kind of" or takes some but not all of your answer
252
+ ? unknown = no clear signal from user
253
+
254
+ ⚠️ CRITICAL - "failed" OUTCOMES ARE ESSENTIAL:
255
+ • If user says you were wrong → outcome="failed"
256
+ • If memory you retrieved was outdated → outcome="failed"
257
+ • If user had to correct you → outcome="failed"
258
+ • If you gave advice that didn't help → outcome="failed"
259
+
260
+ Failed outcomes are how bad memories get deleted. Without them, wrong info persists forever.
261
+ Don't default to "worked" just to be optimistic. Wrong memories MUST be demoted.
262
+
263
+ SELECTIVE SCORING (optional):
264
+ If the scoring prompt shows "Memories surfaced:", you can specify which were actually relevant:
265
+ • related=["doc_id_1", "doc_id_2"] → only those get scored
266
+ • Omit related → all surfaced memories get scored (backwards compatible)
267
+ Unrelated memories get 0 (neutral) - they're not penalized, just skipped.""",
268
+ inputSchema={
269
+ "type": "object",
270
+ "properties": {
271
+ "outcome": {
272
+ "type": "string",
273
+ "enum": ["worked", "failed", "partial", "unknown"],
274
+ "description": "How helpful was your response based on user's reaction"
275
+ },
276
+ "related": {
277
+ "type": "array",
278
+ "items": {"type": "string"},
279
+ "description": "Optional: doc_ids of memories that were actually relevant. Omit to score all surfaced memories."
280
+ }
281
+ },
282
+ "required": ["outcome"]
283
+ }
284
+ ),
285
+ types.Tool(
286
+ name="record_response",
287
+ description="""Store a key takeaway when the transcript alone won't capture important learning.
288
+
289
+ OPTIONAL - Only use for significant exchanges:
290
+ • Major decisions made
291
+ • Complex solutions that worked
292
+ • User corrections (what you got wrong and why)
293
+ • Important context that would be lost
294
+
295
+ Most routine exchanges don't need this - the transcript is enough.
296
+
297
+ INITIAL SCORING (optional):
298
+ You can score the takeaway at creation time based on the current exchange:
299
+ • initial_score="worked" → starts at 0.7 (boosted)
300
+ • initial_score="failed" → starts at 0.2 (demoted, but still stored as "what not to do")
301
+ • Omit → starts at 0.5 (neutral default)""",
302
+ inputSchema={
303
+ "type": "object",
304
+ "properties": {
305
+ "key_takeaway": {
306
+ "type": "string",
307
+ "description": "1-2 sentence summary of the important learning"
308
+ },
309
+ "initial_score": {
310
+ "type": "string",
311
+ "enum": ["worked", "failed", "partial"],
312
+ "description": "Optional: Score based on current exchange outcome. Omit for neutral 0.5 start."
313
+ }
314
+ },
315
+ "required": ["key_takeaway"]
316
+ }
317
+ ),
318
+ types.Tool(
319
+ name="get_context_insights",
320
+ description="""Search your memory before responding. Returns what you know about this user/topic.
321
+
322
+ WORKFLOW (follow these steps):
323
+ 1. get_context_insights(query) ← YOU ARE HERE
324
+ 2. Read the context returned
325
+ 3. search_memory() if you need more details
326
+ 4. Respond to user
327
+ 5. record_response() to complete
328
+
329
+ Returns: Known facts, past solutions, recommended collections, tool stats.
330
+ Fast lookup (5-10ms) - no embedding search, just pattern matching.
331
+
332
+ PROACTIVE MEMORY: If you learn something NEW about the user during the conversation
333
+ (name, preference, goal, project context), use add_to_memory_bank() to store it.
334
+ Don't wait to be asked - good assistants remember what matters.""",
335
+ inputSchema={
336
+ "type": "object",
337
+ "properties": {
338
+ "query": {
339
+ "type": "string",
340
+ "description": "Query/topic you're considering (use user's exact words)"
341
+ }
342
+ },
343
+ "required": ["query"]
344
+ }
345
+ ),
346
+ ]
347
+
348
+ @server.call_tool()
349
+ async def call_tool(name: str, arguments: dict) -> list[types.TextContent]:
350
+ """Handle MCP tool calls."""
351
+ await _initialize_memory()
352
+ session_id = _detect_mcp_client()
353
+
354
+ try:
355
+ if name == "search_memory":
356
+ query = arguments.get("query", "")
357
+ collections = arguments.get("collections")
358
+ limit = arguments.get("limit", 5)
359
+ metadata = arguments.get("metadata")
360
+
361
+ try:
362
+ results = await _memory.search(
363
+ query=query,
364
+ collections=collections,
365
+ limit=limit,
366
+ metadata_filters=metadata
367
+ )
368
+ except Exception as search_err:
369
+ return [types.TextContent(
370
+ type="text",
371
+ text=f"Search error: {search_err}\n\nData path: {_memory.data_path}\nCollections: {list(_memory.collections.keys())}"
372
+ )]
373
+
374
+ # Cache doc_ids for outcome scoring (last call only)
375
+ cached_doc_ids = [r.get("id") for r in results if r.get("id")]
376
+ _mcp_search_cache[session_id] = {
377
+ "doc_ids": cached_doc_ids,
378
+ "query": query,
379
+ "timestamp": datetime.now()
380
+ }
381
+
382
+ if not results:
383
+ # Debug info
384
+ coll_counts = {name: coll.collection.count() if coll.collection else 0 for name, coll in _memory.collections.items()}
385
+ text = f"No results found for '{query}'.\n\nDebug: data_path={_memory.data_path}, collections={coll_counts}"
386
+ else:
387
+ text = f"Found {len(results)} result(s) for '{query}':\n\n"
388
+ for i, r in enumerate(results[:limit], 1):
389
+ metadata = r.get("metadata", {})
390
+ # Content can be in multiple places
391
+ content = r.get("content") or metadata.get("content") or metadata.get("text") or r.get("text", "")
392
+ collection = r.get("collection", "unknown")
393
+ score = metadata.get("score", 0.5)
394
+ uses = metadata.get("uses", 0)
395
+ last_outcome = metadata.get("last_outcome", "unknown")
396
+
397
+ meta_line = f" (score:{score:.2f}, uses:{uses}, last:{last_outcome})" if collection in ["patterns", "history", "working"] else ""
398
+ text += f"{i}. [{collection}]{meta_line} {content}\n\n"
399
+
400
+ return [types.TextContent(type="text", text=text)]
401
+
402
+ elif name == "add_to_memory_bank":
403
+ content = arguments.get("content")
404
+ tags = arguments.get("tags", [])
405
+ importance = arguments.get("importance", 0.7)
406
+ confidence = arguments.get("confidence", 0.7)
407
+
408
+ doc_id = await _memory.store_memory_bank(
409
+ text=content,
410
+ tags=tags,
411
+ importance=importance,
412
+ confidence=confidence
413
+ )
414
+
415
+ return [types.TextContent(
416
+ type="text",
417
+ text=f"Added to memory bank (ID: {doc_id})"
418
+ )]
419
+
420
+ elif name == "update_memory":
421
+ old_content = arguments.get("old_content", "")
422
+ new_content = arguments.get("new_content", "")
423
+
424
+ doc_id = await _memory.update_memory_bank(
425
+ old_content=old_content,
426
+ new_content=new_content
427
+ )
428
+
429
+ if doc_id:
430
+ return [types.TextContent(
431
+ type="text",
432
+ text=f"Updated memory (ID: {doc_id})"
433
+ )]
434
+ else:
435
+ return [types.TextContent(
436
+ type="text",
437
+ text="Memory not found for update"
438
+ )]
439
+
440
+ elif name == "archive_memory":
441
+ content = arguments.get("content", "")
442
+
443
+ success = await _memory.archive_memory_bank(content)
444
+
445
+ if success:
446
+ return [types.TextContent(
447
+ type="text",
448
+ text="Memory archived successfully"
449
+ )]
450
+ else:
451
+ return [types.TextContent(
452
+ type="text",
453
+ text="Memory not found for archiving"
454
+ )]
455
+
456
+ elif name == "score_response":
457
+ outcome = arguments.get("outcome", "unknown")
458
+ related = arguments.get("related") # Optional list of doc_ids to score
459
+
460
+ # Score via FastAPI endpoint (has access to hook-injected doc_ids cache)
461
+ scored_count = 0
462
+ try:
463
+ import httpx
464
+ payload = {
465
+ "conversation_id": session_id,
466
+ "outcome": outcome
467
+ }
468
+ # Only include related if explicitly provided (backwards compatible)
469
+ if related is not None:
470
+ payload["related"] = related
471
+
472
+ async with httpx.AsyncClient() as client:
473
+ response = await client.post(
474
+ "http://127.0.0.1:27182/api/record-outcome",
475
+ json=payload,
476
+ timeout=5.0
477
+ )
478
+ if response.status_code == 200:
479
+ result = response.json()
480
+ scored_count = result.get("documents_scored", 0)
481
+ except Exception as e:
482
+ logger.warning(f"Failed to call FastAPI record-outcome: {e}")
483
+ # Fall back to MCP cache scoring
484
+ if session_id in _mcp_search_cache:
485
+ cached = _mcp_search_cache[session_id]
486
+ doc_ids = cached.get("doc_ids", [])
487
+ # Apply related filter if provided
488
+ if related is not None:
489
+ doc_ids = [d for d in doc_ids if d in related]
490
+ if doc_ids:
491
+ result = await _memory.record_outcome(doc_ids, outcome)
492
+ scored_count = result.get("documents_updated", 0)
493
+ del _mcp_search_cache[session_id]
494
+
495
+ logger.info(f"Scored response: outcome={outcome}, related={related}, scored={scored_count}")
496
+ return [types.TextContent(
497
+ type="text",
498
+ text=f"Scored (outcome={outcome}, {scored_count} memories updated)"
499
+ )]
500
+
501
+ elif name == "record_response":
502
+ key_takeaway = arguments.get("key_takeaway", "")
503
+ initial_score = arguments.get("initial_score") # Optional: worked, failed, partial
504
+
505
+ if not key_takeaway:
506
+ return [types.TextContent(
507
+ type="text",
508
+ text="Error: 'key_takeaway' is required"
509
+ )]
510
+
511
+ # Calculate starting score based on initial_score
512
+ # Matches score deltas from ARCHITECTURE.md: worked +0.20, failed -0.30, partial +0.05
513
+ starting_score = 0.5 # neutral default
514
+ if initial_score == "worked":
515
+ starting_score = 0.7 # 0.5 + 0.2
516
+ elif initial_score == "failed":
517
+ starting_score = 0.2 # 0.5 - 0.3
518
+ elif initial_score == "partial":
519
+ starting_score = 0.55 # 0.5 + 0.05
520
+
521
+ # Store the takeaway in working memory
522
+ doc_id = await _memory.store_working(
523
+ content=f"Key takeaway: {key_takeaway}",
524
+ conversation_id=session_id,
525
+ metadata={
526
+ "type": "key_takeaway",
527
+ "timestamp": datetime.now().isoformat(),
528
+ "initial_outcome": initial_score
529
+ },
530
+ initial_score=starting_score
531
+ )
532
+ logger.info(f"Recorded takeaway (score={starting_score}): {key_takeaway[:50]}...")
533
+ return [types.TextContent(
534
+ type="text",
535
+ text=f"Recorded: {key_takeaway}"
536
+ )]
537
+
538
+ elif name == "get_context_insights":
539
+ query = arguments.get("query", "")
540
+
541
+ if not query:
542
+ return [types.TextContent(
543
+ type="text",
544
+ text="Error: 'query' is required"
545
+ )]
546
+
547
+ # Get context from memory system
548
+ context = await _memory.get_context_for_injection(query)
549
+
550
+ # Cache doc_ids for scoring (last call only)
551
+ cached_doc_ids = context.get("doc_ids", [])
552
+ if cached_doc_ids:
553
+ _mcp_search_cache[session_id] = {
554
+ "doc_ids": cached_doc_ids,
555
+ "query": query,
556
+ "source": "get_context_insights",
557
+ "timestamp": datetime.now()
558
+ }
559
+
560
+ # Format response
561
+ user_facts = context.get("user_facts", [])
562
+ memories = context.get("relevant_memories", [])
563
+
564
+ text = f"Known Context for '{query}':\n\n"
565
+
566
+ if user_facts:
567
+ text += "**Memory Bank:**\n"
568
+ for fact in user_facts:
569
+ text += f"• {fact.get('content', '')}\n"
570
+ text += "\n"
571
+
572
+ if memories:
573
+ text += "**Relevant Memories:**\n"
574
+ for mem in memories:
575
+ coll = mem.get("collection", "unknown")
576
+ content = mem.get("content") or mem.get("metadata", {}).get("content", "")
577
+ score = mem.get("metadata", {}).get("score", 0.5)
578
+ text += f"• [{coll}] (score:{score:.2f}) {content}\n"
579
+ text += "\n"
580
+
581
+ if not user_facts and not memories:
582
+ text += "No relevant context found. This may be a new topic or first interaction.\n"
583
+
584
+ text += f"\n_Cached {len(cached_doc_ids)} doc_ids for outcome scoring._"
585
+
586
+ return [types.TextContent(type="text", text=text)]
587
+
588
+ else:
589
+ return [types.TextContent(
590
+ type="text",
591
+ text=f"Unknown tool: {name}"
592
+ )]
593
+
594
+ except Exception as e:
595
+ logger.error(f"MCP tool error ({name}): {e}")
596
+ return [types.TextContent(
597
+ type="text",
598
+ text=f"Error: {str(e)}"
599
+ )]
600
+
601
+ # Run the server
602
+ async def main():
603
+ async with stdio_server() as (read_stream, write_stream):
604
+ await server.run(read_stream, write_stream, server.create_initialization_options())
605
+
606
+ asyncio.run(main())
607
+
608
+
609
+ if __name__ == "__main__":
610
+ logging.basicConfig(level=logging.INFO)
611
+ run_mcp_server()
@@ -0,0 +1,7 @@
1
+ """
2
+ Roampal Server - FastAPI server for hooks and API
3
+ """
4
+
5
+ from .main import create_app, start_server
6
+
7
+ __all__ = ["create_app", "start_server"]