nia-mcp-server 1.0.25__py3-none-any.whl → 1.0.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nia-mcp-server might be problematic. Click here for more details.

nia_mcp_server/server.py CHANGED
@@ -72,59 +72,188 @@ async def ensure_api_client() -> NIAApiClient:
72
72
  raise ValueError("Failed to validate API key. Check logs for details.")
73
73
  return api_client
74
74
 
75
+ def _detect_resource_type(url: str) -> str:
76
+ """Detect if URL is a GitHub repository or documentation.
77
+
78
+ Args:
79
+ url: The URL to analyze
80
+
81
+ Returns:
82
+ "repository" if GitHub URL or repository pattern, "documentation" otherwise
83
+ """
84
+ import re
85
+ from urllib.parse import urlparse
86
+
87
+ try:
88
+ # First, check for repository-like patterns
89
+ # Pattern 1: owner/repo format (simple case with single slash)
90
+ if re.match(r'^[a-zA-Z0-9_-]+/[a-zA-Z0-9_.-]+$', url):
91
+ return "repository"
92
+
93
+ # Pattern 2: Git SSH format (git@github.com:owner/repo.git)
94
+ if url.startswith('git@'):
95
+ return "repository"
96
+
97
+ # Pattern 3: Git protocol (git://...)
98
+ if url.startswith('git://'):
99
+ return "repository"
100
+
101
+ # Pattern 4: Ends with .git
102
+ if url.endswith('.git'):
103
+ return "repository"
104
+
105
+ # Pattern 5: owner/repo/tree/branch or owner/repo/tree/branch/... format
106
+ if re.match(r'^[a-zA-Z0-9_-]+/[a-zA-Z0-9_.-]+/tree/.+', url):
107
+ return "repository"
108
+
109
+ # Parse as URL for domain-based detection
110
+ parsed = urlparse(url)
111
+ # Only treat as repository if it's actually the github.com domain
112
+ netloc = parsed.netloc.lower()
113
+ if netloc == "github.com" or netloc == "www.github.com":
114
+ return "repository"
115
+
116
+ return "documentation"
117
+ except Exception:
118
+ # Fallback to documentation if parsing fails
119
+ return "documentation"
120
+
75
121
  # Tools
76
122
 
77
123
  @mcp.tool()
78
- async def index_repository(
79
- repo_url: str,
80
- branch: Optional[str] = None
124
+ async def index(
125
+ url: str,
126
+ resource_type: Optional[str] = None,
127
+ branch: Optional[str] = None,
128
+ url_patterns: Optional[List[str]] = None,
129
+ exclude_patterns: Optional[List[str]] = None,
130
+ max_age: Optional[int] = None,
131
+ only_main_content: Optional[bool] = True,
132
+ wait_for: Optional[int] = None,
133
+ include_screenshot: Optional[bool] = None,
134
+ check_llms_txt: Optional[bool] = True,
135
+ llms_txt_strategy: Optional[str] = "prefer"
81
136
  ) -> List[TextContent]:
82
137
  """
83
- Index a GitHub repository for intelligent code search.
84
-
138
+ Universal indexing tool - intelligently indexes GitHub repositories or documentation.
139
+
140
+ Auto-detects resource type from URL:
141
+ - GitHub URLs (containing "github.com") → Repository indexing
142
+ - All other URLs → Documentation indexing
143
+
85
144
  Args:
86
- repo_url: GitHub repository URL (e.g., https://github.com/owner/repo or https://github.com/owner/repo/tree/branch)
145
+ url: GitHub repository URL or documentation site URL (required)
146
+ resource_type: Optional override - "repository" or "documentation" (auto-detected if not provided)
147
+
148
+ # Repository-specific parameters:
87
149
  branch: Branch to index (optional, defaults to main branch)
88
-
150
+
151
+ # Documentation-specific parameters:
152
+ url_patterns: Optional list of URL patterns to include in crawling
153
+ exclude_patterns: Optional list of URL patterns to exclude from crawling
154
+ max_age: Maximum age of cached content in days
155
+ only_main_content: Extract only main content (default: True)
156
+ wait_for: Time to wait for page load in milliseconds
157
+ include_screenshot: Include screenshots of pages
158
+ check_llms_txt: Check for llms.txt file (default: True)
159
+ llms_txt_strategy: Strategy for llms.txt - "prefer", "only", or "ignore" (default: "prefer")
160
+
89
161
  Returns:
90
162
  Status of the indexing operation
91
-
163
+
164
+ Examples:
165
+ # Index a GitHub repository (auto-detected)
166
+ index("https://github.com/owner/repo", branch="main")
167
+
168
+ # Index documentation (auto-detected)
169
+ index("https://docs.example.com", url_patterns=["*/api/*"])
170
+
171
+ # Manual override (if needed for edge cases)
172
+ index("https://github.io/docs", resource_type="documentation")
173
+
92
174
  Important:
93
- - When started indexing, prompt users to either use check_repository_status tool or go to app.trynia.ai to check the status.
175
+ - When indexing starts, use check_resource_status to monitor progress
176
+ - Repository identifier format: owner/repo or owner/repo/tree/branch
94
177
  """
95
178
  try:
96
179
  client = await ensure_api_client()
97
-
98
- # Start indexing
99
- logger.info(f"Starting to index repository: {repo_url}")
100
- result = await client.index_repository(repo_url, branch)
101
-
102
- repository = result.get("repository", repo_url)
103
- status = result.get("status", "unknown")
104
-
105
- if status == "completed":
106
- return [TextContent(
107
- type="text",
108
- text=f"✅ Repository already indexed: {repository}\n"
109
- f"Branch: {result.get('branch', 'main')}\n"
110
- f"You can now search this codebase!"
111
- )]
180
+
181
+ # Detect or validate resource type
182
+ if resource_type:
183
+ if resource_type not in ["repository", "documentation"]:
184
+ return [TextContent(
185
+ type="text",
186
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
187
+ )]
188
+ detected_type = resource_type
112
189
  else:
113
- # Wait for indexing to complete
114
- return [TextContent(
115
- type="text",
116
- text=f"⏳ Indexing started for: {repository}\n"
117
- f"Branch: {branch or 'default'}\n"
118
- f"Status: {status}\n\n"
119
- f"Use `check_repository_status` to monitor progress."
120
- )]
121
-
190
+ detected_type = _detect_resource_type(url)
191
+
192
+ logger.info(f"Indexing {detected_type}: {url}")
193
+
194
+ # Route to appropriate indexing method
195
+ if detected_type == "repository":
196
+ # Index repository
197
+ result = await client.index_repository(url, branch)
198
+
199
+ repository = result.get("repository", url)
200
+ status = result.get("status", "unknown")
201
+
202
+ if status == "completed":
203
+ return [TextContent(
204
+ type="text",
205
+ text=f"✅ Repository already indexed: {repository}\n"
206
+ f"Branch: {result.get('branch', 'main')}\n"
207
+ f"You can now search this codebase!"
208
+ )]
209
+ else:
210
+ return [TextContent(
211
+ type="text",
212
+ text=f"⏳ Indexing started for: {repository}\n"
213
+ f"Branch: {branch or 'default'}\n"
214
+ f"Status: {status}\n\n"
215
+ f"Use `check_resource_status(\"repository\", \"{repository}\")` to monitor progress."
216
+ )]
217
+
218
+ else: # documentation
219
+ # Index documentation
220
+ result = await client.create_data_source(
221
+ url=url,
222
+ url_patterns=url_patterns,
223
+ exclude_patterns=exclude_patterns,
224
+ max_age=max_age,
225
+ only_main_content=only_main_content,
226
+ wait_for=wait_for,
227
+ include_screenshot=include_screenshot,
228
+ check_llms_txt=check_llms_txt,
229
+ llms_txt_strategy=llms_txt_strategy
230
+ )
231
+
232
+ source_id = result.get("id")
233
+ status = result.get("status", "unknown")
234
+
235
+ if status == "completed":
236
+ return [TextContent(
237
+ type="text",
238
+ text=f"✅ Documentation already indexed: {url}\n"
239
+ f"Source ID: {source_id}\n"
240
+ f"You can now search this documentation!"
241
+ )]
242
+ else:
243
+ return [TextContent(
244
+ type="text",
245
+ text=f"⏳ Documentation indexing started: {url}\n"
246
+ f"Source ID: {source_id}\n"
247
+ f"Status: {status}\n\n"
248
+ f"Use `check_resource_status(\"documentation\", \"{source_id}\")` to monitor progress."
249
+ )]
250
+
122
251
  except APIError as e:
123
- logger.error(f"API Error indexing repository: {e} (status_code={e.status_code}, detail={e.detail})")
252
+ logger.error(f"API Error indexing {detected_type}: {e} (status_code={e.status_code}, detail={e.detail})")
124
253
  if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
125
254
  if e.detail and "3 free indexing operations" in e.detail:
126
255
  return [TextContent(
127
- type="text",
256
+ type="text",
128
257
  text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
129
258
  )]
130
259
  else:
@@ -135,7 +264,7 @@ async def index_repository(
135
264
  else:
136
265
  return [TextContent(type="text", text=f"❌ {str(e)}")]
137
266
  except Exception as e:
138
- logger.error(f"Unexpected error indexing repository: {e}")
267
+ logger.error(f"Unexpected error indexing: {e}")
139
268
  error_msg = str(e)
140
269
  if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
141
270
  return [TextContent(
@@ -144,9 +273,79 @@ async def index_repository(
144
273
  )]
145
274
  return [TextContent(
146
275
  type="text",
147
- text=f"❌ Error indexing repository: {error_msg}"
276
+ text=f"❌ Error indexing: {error_msg}"
148
277
  )]
149
278
 
279
+ # @mcp.tool()
280
+ # async def index_repository(
281
+ # repo_url: str,
282
+ # branch: Optional[str] = None
283
+ # ) -> List[TextContent]:
284
+ # """
285
+ # DEPRECATED: Use the unified `index` tool instead.
286
+ #
287
+ # Args:
288
+ # repo_url: GitHub repository URL (e.g., https://github.com/owner/repo or https://github.com/owner/repo/tree/branch)
289
+ # branch: Branch to index (optional, defaults to main branch)
290
+ #
291
+ # Important:
292
+ # - When started indexing, prompt users to either use check_repository_status tool or go to app.trynia.ai to check the status.
293
+ # """
294
+ # try:
295
+ # client = await ensure_api_client()
296
+ #
297
+ # # Start indexing
298
+ # logger.info(f"Starting to index repository: {repo_url}")
299
+ # result = await client.index_repository(repo_url, branch)
300
+ #
301
+ # repository = result.get("repository", repo_url)
302
+ # status = result.get("status", "unknown")
303
+ #
304
+ # if status == "completed":
305
+ # return [TextContent(
306
+ # type="text",
307
+ # text=f"✅ Repository already indexed: {repository}\n"
308
+ # f"Branch: {result.get('branch', 'main')}\n"
309
+ # f"You can now search this codebase!"
310
+ # )]
311
+ # else:
312
+ # # Wait for indexing to complete
313
+ # return [TextContent(
314
+ # type="text",
315
+ # text=f"⏳ Indexing started for: {repository}\n"
316
+ # f"Branch: {branch or 'default'}\n"
317
+ # f"Status: {status}\n\n"
318
+ # f"Use `check_repository_status` to monitor progress."
319
+ # )]
320
+ #
321
+ # except APIError as e:
322
+ # logger.error(f"API Error indexing repository: {e} (status_code={e.status_code}, detail={e.detail})")
323
+ # if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
324
+ # if e.detail and "3 free indexing operations" in e.detail:
325
+ # return [TextContent(
326
+ # type="text",
327
+ # text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
328
+ # )]
329
+ # else:
330
+ # return [TextContent(
331
+ # type="text",
332
+ # text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
333
+ # )]
334
+ # else:
335
+ # return [TextContent(type="text", text=f"❌ {str(e)}")]
336
+ # except Exception as e:
337
+ # logger.error(f"Unexpected error indexing repository: {e}")
338
+ # error_msg = str(e)
339
+ # if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
340
+ # return [TextContent(
341
+ # type="text",
342
+ # text=f"❌ {error_msg}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
343
+ # )]
344
+ # return [TextContent(
345
+ # type="text",
346
+ # text=f"❌ Error indexing repository: {error_msg}"
347
+ # )]
348
+
150
349
  @mcp.tool()
151
350
  async def search_codebase(
152
351
  query: str,
@@ -157,31 +356,12 @@ async def search_codebase(
157
356
  Search indexed repositories using natural language.
158
357
 
159
358
  Args:
160
- query: Natural language search query. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
161
- repositories: List of repositories to search (owner/repo or owner/repo/tree/branch if indexed differently before).
162
- - "owner/repo" - Search entire repository (e.g., "facebook/react")
163
- - "owner/repo/tree/branch/folder" - Search specific folder indexed separately
164
- (e.g., "PostHog/posthog/tree/master/docs")
165
- Use the EXACT format shown in list_repositories output for folder-indexed repos.
166
- If not specified, searches all indexed repos.
167
- include_sources: Whether to include source code in results
359
+ query: Natural language search query.
360
+ repositories: List of repositories to search (owner/repo or owner/repo/tree/branch).
361
+ include_sources: default to True.
168
362
 
169
363
  Returns:
170
364
  Search results with relevant code snippets and explanations
171
-
172
- Examples:
173
- # Search all indexed repositories
174
- search_codebase("How does authentication work?")
175
-
176
- # Search specific repository
177
- search_codebase("How to create custom hooks?", ["facebook/react"])
178
-
179
- # Search folder-indexed repository (use exact format from list_repositories)
180
- search_codebase("What is Flox?", ["PostHog/posthog/tree/master/docs"])
181
-
182
- Important:
183
- - If you want to search a specific folder, use the EXACT repository path shown above
184
- - Example: `search_codebase(\"query\", [\"owner/repo/tree/branch/folder\"])`
185
365
  """
186
366
  try:
187
367
  client = await ensure_api_client()
@@ -324,6 +504,164 @@ async def search_codebase(
324
504
  text=f"❌ Error searching codebase: {error_msg}"
325
505
  )]
326
506
 
507
+ @mcp.tool()
508
+ async def regex_search(
509
+ repositories: List[str],
510
+ query: str,
511
+ pattern: Optional[str] = None,
512
+ file_extensions: Optional[List[str]] = None,
513
+ languages: Optional[List[str]] = None,
514
+ max_results: int = 50,
515
+ include_context: bool = True,
516
+ context_lines: int = 3
517
+ ) -> List[TextContent]:
518
+ """
519
+ Perform regex pattern search over indexed repository source code.
520
+
521
+ Args:
522
+ repositories: List of repositories to search (owner/repo format)
523
+ query: Natural language query or regex pattern (e.g., "function handleSubmit", "class UserController", "/async\\s+function/")
524
+ pattern: Optional explicit regex pattern (overrides automatic extraction from query)
525
+ file_extensions: File extensions to filter (e.g., [".js", ".tsx", ".py"])
526
+ languages: Programming languages to filter (e.g., ["python", "javascript", "typescript"])
527
+ max_results: Maximum number of results to return (default: 50)
528
+ include_context: Include surrounding context lines (default: True)
529
+ context_lines: Number of context lines before/after match (default: 3)
530
+
531
+ Returns:
532
+ Regex search results with exact matches, file locations, and context
533
+
534
+ Examples:
535
+ - Natural language: `regex_search(["owner/repo"], "function handleSubmit")`
536
+ - Direct regex: `regex_search(["owner/repo"], "/async\\s+function\\s+\\w+/")`
537
+ - With filters: `regex_search(["owner/repo"], "class Controller", file_extensions=[".py"])`
538
+ """
539
+ try:
540
+ client = await ensure_api_client()
541
+
542
+ # Require explicit repository selection
543
+ if not repositories:
544
+ return [TextContent(
545
+ type="text",
546
+ text="🔍 **Please specify which repositories to search:**\n\n"
547
+ "1. Use `list_repositories` to see available repositories\n"
548
+ "2. Then call `regex_search([\"owner/repo\"], \"pattern\")`\n\n"
549
+ "**Examples:**\n"
550
+ "```python\n"
551
+ "# Natural language pattern\n"
552
+ "regex_search([\"facebook/react\"], \"function useState\")\n\n"
553
+ "# Direct regex pattern\n"
554
+ "regex_search([\"django/django\"], \"/class\\s+\\w+Admin/\")\n\n"
555
+ "# With file filters\n"
556
+ "regex_search([\"owner/repo\"], \"import React\", file_extensions=[\".tsx\"])\n"
557
+ "```"
558
+ )]
559
+
560
+ logger.info(f"Performing regex search in {len(repositories)} repositories for query: {query}")
561
+
562
+ # Call the regex search API
563
+ result = await client.regex_search(
564
+ repositories=repositories,
565
+ query=query,
566
+ pattern=pattern,
567
+ file_extensions=file_extensions,
568
+ languages=languages,
569
+ max_results=max_results,
570
+ include_context=include_context,
571
+ context_lines=context_lines
572
+ )
573
+
574
+ # Check for errors
575
+ if not result.get("success"):
576
+ error_msg = result.get("error", "Unknown error")
577
+ return [TextContent(
578
+ type="text",
579
+ text=f"❌ **Regex Search Error:** {error_msg}"
580
+ )]
581
+
582
+ # Format the results
583
+ response_parts = []
584
+
585
+ # Add search summary
586
+ total_matches = result.get("total_matches", 0)
587
+ total_files = result.get("total_files", 0)
588
+ pattern_used = result.get("pattern", query)
589
+
590
+ summary = f"🔍 **Regex Search Results**\n\n"
591
+ summary += f"**Query:** `{query}`\n"
592
+ if pattern and pattern != query:
593
+ summary += f"**Pattern Used:** `{pattern_used}`\n"
594
+ summary += f"**Matches Found:** {total_matches} matches in {total_files} files\n"
595
+
596
+ if file_extensions:
597
+ summary += f"**File Extensions:** {', '.join(file_extensions)}\n"
598
+ if languages:
599
+ summary += f"**Languages:** {', '.join(languages)}\n"
600
+
601
+ response_parts.append(summary)
602
+ response_parts.append("\n---\n")
603
+
604
+ # Add the actual results
605
+ results = result.get("results", [])
606
+
607
+ if not results:
608
+ response_parts.append("\n📭 No matches found for the given pattern.")
609
+ else:
610
+ # Group results by file
611
+ file_matches = {}
612
+ for match in results:
613
+ file_path = match.get("file_path", "Unknown")
614
+ if file_path not in file_matches:
615
+ file_matches[file_path] = []
616
+ file_matches[file_path].append(match)
617
+
618
+ # Format each file's matches
619
+ for file_path, matches in file_matches.items():
620
+ response_parts.append(f"\n### 📄 `{file_path}`\n")
621
+
622
+ for match in matches:
623
+ line_number = match.get("line_number", 0)
624
+ matched_text = match.get("matched_text", "")
625
+ context = match.get("context", "")
626
+
627
+ response_parts.append(f"\n**Line {line_number}:** `{matched_text}`\n")
628
+
629
+ if include_context and context:
630
+ # Format context with line numbers
631
+ context_start = match.get("context_start_line", line_number)
632
+ response_parts.append("\n```\n")
633
+ context_lines_list = context.split('\n')
634
+ for i, line in enumerate(context_lines_list):
635
+ current_line_num = context_start + i
636
+ marker = ">" if current_line_num == line_number else " "
637
+ response_parts.append(f"{marker}{current_line_num:4d}: {line}\n")
638
+ response_parts.append("```\n")
639
+
640
+ # Add search hints if available
641
+ search_hints = result.get("search_hints", {})
642
+ if search_hints and search_hints.get("is_regex"):
643
+ response_parts.append("\n---\n")
644
+ response_parts.append("💡 **Search Hints:**\n")
645
+ if search_hints.get("case_sensitive"):
646
+ response_parts.append("- Case-sensitive search enabled\n")
647
+ if search_hints.get("whole_word"):
648
+ response_parts.append("- Whole word matching enabled\n")
649
+
650
+ # Combine all parts into final response
651
+ full_response = "".join(response_parts)
652
+
653
+ return [TextContent(
654
+ type="text",
655
+ text=full_response
656
+ )]
657
+
658
+ except Exception as e:
659
+ logger.error(f"Regex search error: {e}", exc_info=True)
660
+ return [TextContent(
661
+ type="text",
662
+ text=f"❌ **Regex Search Error:** {str(e)}"
663
+ )]
664
+
327
665
  @mcp.tool()
328
666
  async def search_documentation(
329
667
  query: str,
@@ -336,18 +674,11 @@ async def search_documentation(
336
674
  Args:
337
675
  query: Natural language search query. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
338
676
  sources: List of documentation identifiers to search. Preferred format is UUID, but also supports:
339
- - Source UUIDs (e.g., "550e8400-e29b-41d4-a716-446655440000") - RECOMMENDED
340
- - Display names (e.g., "Vercel AI SDK - Core")
341
- - URLs (e.g., "https://sdk.vercel.ai/docs")
342
- include_sources: Whether to include source references in results
343
-
344
- Returns:
345
- Search results with relevant documentation excerpts
346
-
677
+ - Source UUIDs - RECOMMENDED
678
+ - Display names
679
+ - URLs
347
680
  Important:
348
681
  - UUIDs are the preferred identifier format for best performance
349
- - Use `list_documentation` tool to see available sources and their UUIDs
350
- - Display names and URLs are also supported for convenience
351
682
  """
352
683
  try:
353
684
  client = await ensure_api_client()
@@ -474,9 +805,7 @@ async def search_documentation(
474
805
 
475
806
  # @mcp.tool()
476
807
  # async def list_repositories() -> List[TextContent]:
477
- """
478
- List all indexed repositories.
479
-
808
+ """
480
809
  Returns:
481
810
  List of indexed repositories with their status
482
811
  """
@@ -563,9 +892,6 @@ async def search_documentation(
563
892
 
564
893
  Args:
565
894
  repository: Repository in owner/repo format
566
-
567
- Returns:
568
- Current status of the repository
569
895
  """
570
896
  try:
571
897
  client = await ensure_api_client()
@@ -619,97 +945,80 @@ async def search_documentation(
619
945
  text=f"❌ Error checking repository status: {str(e)}"
620
946
  )]
621
947
 
622
- @mcp.tool()
623
- async def index_documentation(
624
- url: str,
625
- url_patterns: Optional[List[str]] = None,
626
- exclude_patterns: Optional[List[str]] = None,
627
- max_age: Optional[int] = None,
628
- only_main_content: Optional[bool] = True,
629
- wait_for: Optional[int] = None,
630
- include_screenshot: Optional[bool] = None,
631
- check_llms_txt: Optional[bool] = True,
632
- llms_txt_strategy: Optional[str] = "prefer"
633
- ) -> List[TextContent]:
634
- """
635
- Index documentation or website for intelligent search.
636
-
637
- Args:
638
- url: URL of the documentation site to index
639
- url_patterns: Optional list of URL patterns to include in crawling (e.g., ["/docs/*", "/guide/*"])
640
- exclude_patterns: Optional list of URL patterns to exclude from crawling (e.g., ["/blog/*", "/changelog/*"])
641
- max_age: Maximum age of cached content in seconds (for fast scraping mode)
642
- only_main_content: Extract only main content (removes navigation, ads, etc.)
643
- wait_for: Time to wait for page to load in milliseconds (defaults to backend setting)
644
- include_screenshot: Whether to capture full page screenshots (defaults to backend setting)
645
- check_llms_txt: Check for llms.txt file for curated documentation URLs (default: True)
646
- llms_txt_strategy: How to use llms.txt if found:
647
- - "prefer": Start with llms.txt URLs, then crawl additional pages if under limit
648
- - "only": Only index URLs listed in llms.txt
649
- - "ignore": Skip llms.txt check (traditional behavior)
650
-
651
- Returns:
652
- Status of the indexing operation
653
-
654
- Important:
655
- - When started indexing, prompt users to either use check_documentation_status tool or go to app.trynia.ai to check the status.
656
- - By default, crawls the entire domain (up to 10,000 pages)
657
- - Use exclude_patterns to filter out unwanted sections like blogs, changelogs, etc.
658
- """
659
- try:
660
- client = await ensure_api_client()
661
-
662
- # Create and start indexing
663
- logger.info(f"Starting to index documentation: {url}")
664
- result = await client.create_data_source(
665
- url=url,
666
- url_patterns=url_patterns,
667
- exclude_patterns=exclude_patterns,
668
- max_age=max_age,
669
- only_main_content=only_main_content,
670
- wait_for=wait_for,
671
- include_screenshot=include_screenshot,
672
- check_llms_txt=check_llms_txt,
673
- llms_txt_strategy=llms_txt_strategy
674
- )
675
-
676
- source_id = result.get("id")
677
- status = result.get("status", "unknown")
678
-
679
- if status == "completed":
680
- return [TextContent(
681
- type="text",
682
- text=f" Documentation already indexed: {url}\n"
683
- f"Source ID: {source_id}\n"
684
- f"You can now search this documentation!"
685
- )]
686
- else:
687
- return [TextContent(
688
- type="text",
689
- text=f"⏳ Documentation indexing started: {url}\n"
690
- f"Source ID: {source_id}\n"
691
- f"Status: {status}\n\n"
692
- f"Use `check_documentation_status` to monitor progress."
693
- )]
694
-
695
- except APIError as e:
696
- logger.error(f"API Error indexing documentation: {e}")
697
- error_msg = f"❌ {str(e)}"
698
- if e.status_code == 403 and "lifetime limit" in str(e).lower():
699
- error_msg += "\n\n💡 Tip: You've reached the free tier limit of 3 indexing operations. Upgrade to Pro for unlimited access."
700
- return [TextContent(type="text", text=error_msg)]
701
- except Exception as e:
702
- logger.error(f"Error indexing documentation: {e}")
703
- return [TextContent(
704
- type="text",
705
- text=f"❌ Error indexing documentation: {str(e)}"
706
- )]
948
+ # @mcp.tool()
949
+ # async def index_documentation(
950
+ # url: str,
951
+ # url_patterns: Optional[List[str]] = None,
952
+ # exclude_patterns: Optional[List[str]] = None,
953
+ # max_age: Optional[int] = None,
954
+ # only_main_content: Optional[bool] = True,
955
+ # wait_for: Optional[int] = None,
956
+ # include_screenshot: Optional[bool] = None,
957
+ # check_llms_txt: Optional[bool] = True,
958
+ # llms_txt_strategy: Optional[str] = "prefer"
959
+ # ) -> List[TextContent]:
960
+ # """
961
+ # DEPRECATED: Use the unified `index` tool instead.
962
+ #
963
+ # Index documentation or website for intelligent search.
964
+ #
965
+ # Args:
966
+ # url: URL of the documentation site to index
967
+ # url_patterns: Optional list of URL patterns to include in crawling
968
+ # exclude_patterns: Optional list of URL patterns to exclude from crawling
969
+ # """
970
+ # try:
971
+ # client = await ensure_api_client()
972
+ #
973
+ # # Create and start indexing
974
+ # logger.info(f"Starting to index documentation: {url}")
975
+ # result = await client.create_data_source(
976
+ # url=url,
977
+ # url_patterns=url_patterns,
978
+ # exclude_patterns=exclude_patterns,
979
+ # max_age=max_age,
980
+ # only_main_content=only_main_content,
981
+ # wait_for=wait_for,
982
+ # include_screenshot=include_screenshot,
983
+ # check_llms_txt=check_llms_txt,
984
+ # llms_txt_strategy=llms_txt_strategy
985
+ # )
986
+ #
987
+ # source_id = result.get("id")
988
+ # status = result.get("status", "unknown")
989
+ #
990
+ # if status == "completed":
991
+ # return [TextContent(
992
+ # type="text",
993
+ # text=f"✅ Documentation already indexed: {url}\n"
994
+ # f"Source ID: {source_id}\n"
995
+ # f"You can now search this documentation!"
996
+ # )]
997
+ # else:
998
+ # return [TextContent(
999
+ # type="text",
1000
+ # text=f"⏳ Documentation indexing started: {url}\n"
1001
+ # f"Source ID: {source_id}\n"
1002
+ # f"Status: {status}\n\n"
1003
+ # f"Use `check_documentation_status` to monitor progress."
1004
+ # )]
1005
+ #
1006
+ # except APIError as e:
1007
+ # logger.error(f"API Error indexing documentation: {e}")
1008
+ # error_msg = f" {str(e)}"
1009
+ # if e.status_code == 403 and "lifetime limit" in str(e).lower():
1010
+ # error_msg += "\n\n💡 Tip: You've reached the free tier limit of 3 indexing operations. Upgrade to Pro for unlimited access."
1011
+ # return [TextContent(type="text", text=error_msg)]
1012
+ # except Exception as e:
1013
+ # logger.error(f"Error indexing documentation: {e}")
1014
+ # return [TextContent(
1015
+ # type="text",
1016
+ # text=f" Error indexing documentation: {str(e)}"
1017
+ # )]
707
1018
 
708
1019
  # @mcp.tool()
709
1020
  # async def list_documentation() -> List[TextContent]:
710
1021
  """
711
- List all indexed documentation sources.
712
-
713
1022
  Returns:
714
1023
  List of indexed documentation with their status
715
1024
  """
@@ -767,8 +1076,6 @@ async def index_documentation(
767
1076
  # @mcp.tool()
768
1077
  # async def check_documentation_status(source_id: str) -> List[TextContent]:
769
1078
  """
770
- Check the indexing status of a documentation source.
771
-
772
1079
  Args:
773
1080
  source_id: Documentation source ID
774
1081
 
@@ -833,98 +1140,458 @@ async def index_documentation(
833
1140
  # Combined Resource Management Tools
834
1141
 
835
1142
  @mcp.tool()
836
- async def rename_resource(
837
- resource_type: str,
838
- identifier: str,
839
- new_name: str
1143
+ async def manage_resource(
1144
+ action: str,
1145
+ resource_type: Optional[str] = None,
1146
+ identifier: Optional[str] = None,
1147
+ new_name: Optional[str] = None
840
1148
  ) -> List[TextContent]:
841
1149
  """
842
- Rename a resource (repository or documentation) for better organization.
1150
+ Unified resource management tool for repositories and documentation.
843
1151
 
844
1152
  Args:
845
- resource_type: Type of resource - "repository" or "documentation"
846
- identifier:
847
- - For repository: Repository in owner/repo format (e.g., "facebook/react")
848
- - For documentation: UUID preferred, also supports display name or URL (e.g., "550e8400-e29b-41d4-a716-446655440000", "Vercel AI SDK - Core", or "https://docs.trynia.ai/")
849
- new_name: New display name for the resource (1-100 characters)
850
-
851
- Returns:
852
- Confirmation of rename operation
1153
+ action: Action to perform - "list", "status", "rename", or "delete"
1154
+ resource_type: Type of resource - "repository" or "documentation" (required for status/rename/delete, optional for list)
1155
+ identifier: Resource identifier (required for status/rename/delete):
1156
+ - For repos: Repository in owner/repo format (e.g., "facebook/react")
1157
+ - For docs: UUID preferred, also supports display name or URL
1158
+ new_name: New display name (required only for rename action, 1-100 characters)
853
1159
 
854
1160
  Examples:
855
- - rename_resource("repository", "facebook/react", "React Framework")
856
- - rename_resource("documentation", "550e8400-e29b-41d4-a716-446655440000", "Python Official Docs")
857
- - rename_resource("documentation", "https://docs.trynia.ai/", "NIA Documentation")
1161
+ # List all resources
1162
+ manage_resource(action="list")
1163
+
1164
+ # List only repositories
1165
+ manage_resource(action="list", resource_type="repository")
1166
+
1167
+ # Check status
1168
+ manage_resource(action="status", resource_type="repository", identifier="owner/repo")
1169
+
1170
+ # Rename resource
1171
+ manage_resource(action="rename", resource_type="repository", identifier="owner/repo", new_name="My Project")
1172
+
1173
+ # Delete resource
1174
+ manage_resource(action="delete", resource_type="documentation", identifier="uuid-here")
858
1175
  """
859
1176
  try:
860
- # Validate resource type
861
- if resource_type not in ["repository", "documentation"]:
1177
+ # Validate action
1178
+ valid_actions = ["list", "status", "rename", "delete"]
1179
+ if action not in valid_actions:
862
1180
  return [TextContent(
863
1181
  type="text",
864
- text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
1182
+ text=f"❌ Invalid action: '{action}'. Must be one of: {', '.join(valid_actions)}"
865
1183
  )]
866
1184
 
867
- # Validate name length
868
- if not new_name or len(new_name) > 100:
1185
+ # Validate resource_type when provided
1186
+ if resource_type and resource_type not in ["repository", "documentation"]:
869
1187
  return [TextContent(
870
1188
  type="text",
871
- text="❌ Display name must be between 1 and 100 characters."
1189
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
872
1190
  )]
873
1191
 
1192
+ # Validate required parameters based on action
1193
+ if action in ["status", "rename", "delete"]:
1194
+ if not resource_type:
1195
+ return [TextContent(
1196
+ type="text",
1197
+ text=f"❌ resource_type is required for action '{action}'"
1198
+ )]
1199
+ if not identifier:
1200
+ return [TextContent(
1201
+ type="text",
1202
+ text=f"❌ identifier is required for action '{action}'"
1203
+ )]
1204
+
1205
+ if action == "rename":
1206
+ if not new_name:
1207
+ return [TextContent(
1208
+ type="text",
1209
+ text="❌ new_name is required for rename action"
1210
+ )]
1211
+ # Validate name length
1212
+ if len(new_name) > 100:
1213
+ return [TextContent(
1214
+ type="text",
1215
+ text="❌ Display name must be between 1 and 100 characters."
1216
+ )]
1217
+
874
1218
  client = await ensure_api_client()
875
1219
 
876
- if resource_type == "repository":
877
- result = await client.rename_repository(identifier, new_name)
878
- resource_desc = f"repository '{identifier}'"
879
- else: # documentation
880
- result = await client.rename_data_source(identifier, new_name)
881
- resource_desc = f"documentation source"
1220
+ # ===== LIST ACTION =====
1221
+ if action == "list":
1222
+ # Validate resource type if provided
1223
+ if resource_type and resource_type not in ["repository", "documentation"]:
1224
+ return [TextContent(
1225
+ type="text",
1226
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository', 'documentation', or None for all."
1227
+ )]
882
1228
 
883
- if result.get("success"):
1229
+ lines = []
1230
+
1231
+ # Determine what to list
1232
+ list_repos = resource_type in [None, "repository"]
1233
+ list_docs = resource_type in [None, "documentation"]
1234
+
1235
+ if list_repos:
1236
+ repositories = await client.list_repositories()
1237
+
1238
+ if repositories:
1239
+ lines.append("# Indexed Repositories\n")
1240
+ for repo in repositories:
1241
+ status_icon = "✅" if repo.get("status") == "completed" else "⏳"
1242
+
1243
+ # Show display name if available, otherwise show repository
1244
+ display_name = repo.get("display_name")
1245
+ repo_name = repo['repository']
1246
+
1247
+ if display_name:
1248
+ lines.append(f"\n## {status_icon} {display_name}")
1249
+ lines.append(f"- **Repository:** {repo_name}")
1250
+ else:
1251
+ lines.append(f"\n## {status_icon} {repo_name}")
1252
+
1253
+ lines.append(f"- **Branch:** {repo.get('branch', 'main')}")
1254
+ lines.append(f"- **Status:** {repo.get('status', 'unknown')}")
1255
+ if repo.get("indexed_at"):
1256
+ lines.append(f"- **Indexed:** {repo['indexed_at']}")
1257
+ if repo.get("error"):
1258
+ lines.append(f"- **Error:** {repo['error']}")
1259
+
1260
+ # Add usage hint for completed repositories
1261
+ if repo.get("status") == "completed":
1262
+ lines.append(f"- **Usage:** `search_codebase(query, [\"{repo_name}\"])`")
1263
+ elif resource_type == "repository":
1264
+ lines.append("No indexed repositories found.\n\n")
1265
+ lines.append("Get started by indexing a repository:\n")
1266
+ lines.append("Use `index` with a GitHub URL.")
1267
+
1268
+ if list_docs:
1269
+ sources = await client.list_data_sources()
1270
+
1271
+ if sources:
1272
+ if lines: # Add separator if we already have repositories
1273
+ lines.append("\n---\n")
1274
+ lines.append("# Indexed Documentation\n")
1275
+
1276
+ for source in sources:
1277
+ status_icon = "✅" if source.get("status") == "completed" else "⏳"
1278
+
1279
+ # Show display name if available, otherwise show URL
1280
+ display_name = source.get("display_name")
1281
+ url = source.get('url', 'Unknown URL')
1282
+
1283
+ if display_name:
1284
+ lines.append(f"\n## {status_icon} {display_name}")
1285
+ lines.append(f"- **URL:** {url}")
1286
+ else:
1287
+ lines.append(f"\n## {status_icon} {url}")
1288
+
1289
+ lines.append(f"- **ID:** {source['id']}")
1290
+ lines.append(f"- **Status:** {source.get('status', 'unknown')}")
1291
+ lines.append(f"- **Type:** {source.get('source_type', 'web')}")
1292
+ if source.get("page_count", 0) > 0:
1293
+ lines.append(f"- **Pages:** {source['page_count']}")
1294
+ if source.get("created_at"):
1295
+ lines.append(f"- **Created:** {source['created_at']}")
1296
+ elif resource_type == "documentation":
1297
+ lines.append("No indexed documentation found.\n\n")
1298
+ lines.append("Get started by indexing documentation:\n")
1299
+ lines.append("Use `index` with a URL.")
1300
+
1301
+ if not lines:
1302
+ lines.append("No indexed resources found.\n\n")
1303
+ lines.append("Get started by indexing:\n")
1304
+ lines.append("- Use `index` for GitHub repos or URLs\n")
1305
+
1306
+ return [TextContent(type="text", text="\n".join(lines))]
1307
+
1308
+ # ===== STATUS ACTION =====
1309
+ elif action == "status":
1310
+ if resource_type == "repository":
1311
+ status = await client.get_repository_status(identifier)
1312
+ if not status:
1313
+ return [TextContent(
1314
+ type="text",
1315
+ text=f"❌ Repository '{identifier}' not found."
1316
+ )]
1317
+ title = f"Repository Status: {identifier}"
1318
+ status_key = "status"
1319
+ else: # documentation
1320
+ status = await client.get_data_source_status(identifier)
1321
+ if not status:
1322
+ return [TextContent(
1323
+ type="text",
1324
+ text=f"❌ Documentation source '{identifier}' not found."
1325
+ )]
1326
+ title = f"Documentation Status: {status.get('url', 'Unknown URL')}"
1327
+ status_key = "status"
1328
+
1329
+ # Format status with appropriate icon
1330
+ status_text = status.get(status_key, "unknown")
1331
+ status_icon = {
1332
+ "completed": "✅",
1333
+ "indexing": "⏳",
1334
+ "processing": "⏳",
1335
+ "failed": "❌",
1336
+ "pending": "🔄",
1337
+ "error": "❌"
1338
+ }.get(status_text, "❓")
1339
+
1340
+ lines = [
1341
+ f"# {title}\n",
1342
+ f"{status_icon} **Status:** {status_text}"
1343
+ ]
1344
+
1345
+ # Add resource-specific fields
1346
+ if resource_type == "repository":
1347
+ lines.append(f"**Branch:** {status.get('branch', 'main')}")
1348
+ if status.get("progress"):
1349
+ progress = status["progress"]
1350
+ if isinstance(progress, dict):
1351
+ lines.append(f"**Progress:** {progress.get('percentage', 0)}%")
1352
+ if progress.get("stage"):
1353
+ lines.append(f"**Stage:** {progress['stage']}")
1354
+ else: # documentation
1355
+ lines.append(f"**Source ID:** {identifier}")
1356
+ if status.get("page_count", 0) > 0:
1357
+ lines.append(f"**Pages Indexed:** {status['page_count']}")
1358
+ if status.get("details"):
1359
+ details = status["details"]
1360
+ if details.get("progress"):
1361
+ lines.append(f"**Progress:** {details['progress']}%")
1362
+ if details.get("stage"):
1363
+ lines.append(f"**Stage:** {details['stage']}")
1364
+
1365
+ # Common fields
1366
+ if status.get("indexed_at"):
1367
+ lines.append(f"**Indexed:** {status['indexed_at']}")
1368
+ elif status.get("created_at"):
1369
+ lines.append(f"**Created:** {status['created_at']}")
1370
+
1371
+ if status.get("error"):
1372
+ lines.append(f"**Error:** {status['error']}")
1373
+
1374
+ return [TextContent(type="text", text="\n".join(lines))]
1375
+
1376
+ # ===== RENAME ACTION =====
1377
+ elif action == "rename":
1378
+ if resource_type == "repository":
1379
+ result = await client.rename_repository(identifier, new_name)
1380
+ resource_desc = f"repository '{identifier}'"
1381
+ else: # documentation
1382
+ result = await client.rename_data_source(identifier, new_name)
1383
+ resource_desc = f"documentation source"
1384
+
1385
+ if result.get("success"):
1386
+ return [TextContent(
1387
+ type="text",
1388
+ text=f"✅ Successfully renamed {resource_desc} to '{new_name}'"
1389
+ )]
1390
+ else:
1391
+ return [TextContent(
1392
+ type="text",
1393
+ text=f"❌ Failed to rename {resource_type}: {result.get('message', 'Unknown error')}"
1394
+ )]
1395
+
1396
+ # ===== DELETE ACTION =====
1397
+ elif action == "delete":
1398
+ if resource_type == "repository":
1399
+ success = await client.delete_repository(identifier)
1400
+ resource_desc = f"repository: {identifier}"
1401
+ else: # documentation
1402
+ success = await client.delete_data_source(identifier)
1403
+ resource_desc = f"documentation source: {identifier}"
1404
+
1405
+ if success:
1406
+ return [TextContent(
1407
+ type="text",
1408
+ text=f"✅ Successfully deleted {resource_desc}"
1409
+ )]
1410
+ else:
1411
+ return [TextContent(
1412
+ type="text",
1413
+ text=f"❌ Failed to delete {resource_desc}"
1414
+ )]
1415
+
1416
+ except APIError as e:
1417
+ logger.error(f"API Error in manage_resource ({action}): {e}")
1418
+ error_msg = f"❌ {str(e)}"
1419
+ if e.status_code == 403 or "free tier limit" in str(e).lower():
1420
+ if e.detail and "3 free indexing operations" in e.detail:
1421
+ error_msg = f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
1422
+ else:
1423
+ error_msg += "\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1424
+ return [TextContent(type="text", text=error_msg)]
1425
+ except Exception as e:
1426
+ logger.error(f"Error in manage_resource ({action}): {e}")
1427
+ error_msg = str(e)
1428
+ if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
884
1429
  return [TextContent(
885
1430
  type="text",
886
- text=f" Successfully renamed {resource_desc} to '{new_name}'"
1431
+ text=f" {error_msg}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
887
1432
  )]
888
- else:
1433
+ return [TextContent(
1434
+ type="text",
1435
+ text=f"❌ Error in {action} operation: {error_msg}"
1436
+ )]
1437
+
1438
+ @mcp.tool()
1439
+ async def get_github_file_tree(
1440
+ repository: str,
1441
+ branch: Optional[str] = None,
1442
+ include_paths: Optional[List[str]] = None,
1443
+ exclude_paths: Optional[List[str]] = None,
1444
+ file_extensions: Optional[List[str]] = None,
1445
+ exclude_extensions: Optional[List[str]] = None,
1446
+ show_full_paths: bool = False
1447
+ ) -> List[TextContent]:
1448
+ """
1449
+ Get file and folder structure directly from GitHub API (no indexing required).
1450
+
1451
+ Args:
1452
+ repository: Repository identifier (owner/repo format, e.g., "facebook/react")
1453
+ branch: Optional branch name (defaults to repository's default branch)
1454
+ include_paths: Only show files in these paths (e.g., ["src/", "lib/"])
1455
+ exclude_paths: Hide files in these paths (e.g., ["node_modules/", "dist/", "test/"])
1456
+ file_extensions: Only show these file types (e.g., [".py", ".js", ".ts"])
1457
+ exclude_extensions: Hide these file types (e.g., [".md", ".lock", ".json"])
1458
+ show_full_paths: Show full paths instead of tree structure (default: False)
1459
+
1460
+ Returns:
1461
+ Filtered file tree structure from GitHub with stats
1462
+ """
1463
+ try:
1464
+ client = await ensure_api_client()
1465
+
1466
+ # Require explicit repository specification
1467
+ if not repository:
889
1468
  return [TextContent(
890
1469
  type="text",
891
- text=f" Failed to rename {resource_type}: {result.get('message', 'Unknown error')}"
1470
+ text="🔍 **Please specify which repository to get file tree from:**\n\n"
1471
+ "Usage: `get_github_file_tree(\"owner/repo\")`\n\n"
1472
+ "**Examples:**\n"
1473
+ "```\n"
1474
+ "get_github_file_tree(\"facebook/react\")\n"
1475
+ "get_github_file_tree(\"microsoft/vscode\", \"main\")\n"
1476
+ "```"
892
1477
  )]
893
1478
 
1479
+ logger.info(f"Getting GitHub tree for repository: {repository}, branch: {branch or 'default'}, filters: {include_paths or exclude_paths or file_extensions or exclude_extensions}")
1480
+
1481
+ # Call API with filters
1482
+ result = await client.get_github_tree(
1483
+ repository,
1484
+ branch=branch,
1485
+ include_paths=include_paths,
1486
+ exclude_paths=exclude_paths,
1487
+ file_extensions=file_extensions,
1488
+ exclude_extensions=exclude_extensions,
1489
+ show_full_paths=show_full_paths
1490
+ )
1491
+
1492
+ # Format response
1493
+ response_text = f"# 📁 GitHub File Tree: {result.get('owner')}/{result.get('repo')}\n\n"
1494
+ response_text += f"**Branch:** `{result.get('branch')}`\n"
1495
+ response_text += f"**SHA:** `{result.get('sha')}`\n"
1496
+ response_text += f"**Retrieved:** {result.get('retrieved_at')}\n"
1497
+ response_text += f"**Source:** GitHub API (always current)\n"
1498
+
1499
+ # Show active filters
1500
+ filters = result.get("filters_applied", {})
1501
+ active_filters = []
1502
+ if filters.get("include_paths"):
1503
+ active_filters.append(f"📂 Included paths: {', '.join(filters['include_paths'])}")
1504
+ if filters.get("exclude_paths"):
1505
+ active_filters.append(f"🚫 Excluded paths: {', '.join(filters['exclude_paths'])}")
1506
+ if filters.get("file_extensions"):
1507
+ active_filters.append(f"📄 File types: {', '.join(filters['file_extensions'])}")
1508
+ if filters.get("exclude_extensions"):
1509
+ active_filters.append(f"🚫 Excluded types: {', '.join(filters['exclude_extensions'])}")
1510
+ if filters.get("show_full_paths"):
1511
+ active_filters.append(f"📍 Showing full paths")
1512
+
1513
+ if active_filters:
1514
+ response_text += f"**Filters:** {' | '.join(active_filters)}\n"
1515
+
1516
+ response_text += "\n"
1517
+
1518
+ # Add stats
1519
+ stats = result.get("stats", {})
1520
+ response_text += "## 📊 Statistics\n\n"
1521
+ response_text += f"- **Total Files:** {stats.get('total_files', 0)}\n"
1522
+ response_text += f"- **Total Directories:** {stats.get('total_directories', 0)}\n"
1523
+ response_text += f"- **Max Depth:** {stats.get('max_depth', 0)} levels\n"
1524
+
1525
+ # File extensions breakdown
1526
+ file_extensions = stats.get("file_extensions", {})
1527
+ if file_extensions:
1528
+ response_text += f"\n**File Types:**\n"
1529
+ sorted_extensions = sorted(file_extensions.items(), key=lambda x: x[1], reverse=True)
1530
+ for ext, count in sorted_extensions[:10]: # Show top 10
1531
+ ext_name = ext if ext != "no_extension" else "(no extension)"
1532
+ response_text += f" - `{ext_name}`: {count} files\n"
1533
+
1534
+ # Tree structure (full)
1535
+ tree_text = result.get("tree_text", "")
1536
+ if tree_text:
1537
+ response_text += "\n## 🌳 Directory Structure\n\n"
1538
+ response_text += "```\n"
1539
+ response_text += tree_text
1540
+ response_text += "\n```\n"
1541
+
1542
+ # Truncation warning
1543
+ if result.get("truncated"):
1544
+ response_text += "\n⚠️ **Note:** Repository is very large. Tree may be truncated by GitHub.\n"
1545
+
1546
+ # Usage hints
1547
+ response_text += "\n---\n"
1548
+ response_text += "💡 **Next Steps:**\n"
1549
+ response_text += f"- Index this repository: `index(\"{repository}\")`\n"
1550
+ response_text += "- Refine with filters (examples below)\n"
1551
+ response_text += "- Use `manage_resource(\"status\", \"repository\", \"{}\")` to check indexing status\n\n".format(repository)
1552
+
1553
+ # Show filter examples if no filters were used
1554
+ if not active_filters:
1555
+ response_text += "**Filter Examples:**\n"
1556
+ response_text += f"- Only Python files: `file_extensions=[\".py\"]`\n"
1557
+ response_text += f"- Exclude tests: `exclude_paths=[\"test/\", \"tests/\"]`\n"
1558
+ response_text += f"- Only src directory: `include_paths=[\"src/\"]`\n"
1559
+ response_text += f"- Full paths: `show_full_paths=True`\n"
1560
+
1561
+ return [TextContent(type="text", text=response_text)]
1562
+
894
1563
  except APIError as e:
895
- logger.error(f"API Error renaming {resource_type}: {e}")
1564
+ logger.error(f"API Error getting GitHub tree: {e}")
896
1565
  error_msg = f"❌ {str(e)}"
897
- if e.status_code == 403 and "lifetime limit" in str(e).lower():
898
- error_msg += "\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1566
+ if e.status_code == 404:
1567
+ error_msg = f" Repository '{repository}' not found or not accessible.\n\n"
1568
+ error_msg += "**Possible reasons:**\n"
1569
+ error_msg += "- Repository doesn't exist\n"
1570
+ error_msg += "- Repository is private and GitHub App not installed\n"
1571
+ error_msg += "- Invalid owner/repo format\n\n"
1572
+ error_msg += "**Note:** You must have the repository indexed first, or have GitHub App installed."
899
1573
  return [TextContent(type="text", text=error_msg)]
900
1574
  except Exception as e:
901
- logger.error(f"Error renaming {resource_type}: {e}")
1575
+ logger.error(f"Error getting GitHub tree: {e}")
902
1576
  return [TextContent(
903
1577
  type="text",
904
- text=f"❌ Error renaming {resource_type}: {str(e)}"
1578
+ text=f"❌ Error getting GitHub file tree: {str(e)}"
905
1579
  )]
906
1580
 
907
- @mcp.tool()
908
- async def delete_resource(
909
- resource_type: str,
910
- identifier: str
911
- ) -> List[TextContent]:
1581
+ # DEPRECATED: Use manage_resource(action="delete") instead
1582
+ # @mcp.tool()
1583
+ # async def delete_resource(
1584
+ # resource_type: str,
1585
+ # identifier: str
1586
+ # ) -> List[TextContent]:
912
1587
  """
913
1588
  Delete an indexed resource (repository or documentation).
914
1589
 
915
1590
  Args:
916
1591
  resource_type: Type of resource - "repository" or "documentation"
917
1592
  identifier:
918
- - For repository: Repository in owner/repo format (e.g., "facebook/react")
919
- - For documentation: UUID preferred, also supports display name or URL (e.g., "550e8400-e29b-41d4-a716-446655440000", "Vercel AI SDK - Core", or "https://docs.trynia.ai/")
920
-
921
- Returns:
922
- Confirmation of deletion
923
-
924
- Examples:
925
- - delete_resource("repository", "facebook/react")
926
- - delete_resource("documentation", "550e8400-e29b-41d4-a716-446655440000")
927
- - delete_resource("documentation", "https://docs.trynia.ai/")
1593
+ - For repos: Repository in owner/repo format (e.g., "facebook/react")
1594
+ - For docs: UUID preferred, also supports display name or URL
928
1595
  """
929
1596
  try:
930
1597
  # Validate resource type
@@ -967,30 +1634,20 @@ async def delete_resource(
967
1634
  text=f"❌ Error deleting {resource_type}: {str(e)}"
968
1635
  )]
969
1636
 
970
- @mcp.tool()
971
- async def check_resource_status(
972
- resource_type: str,
973
- identifier: str
974
- ) -> List[TextContent]:
1637
+ # DEPRECATED: Use manage_resource(action="status") instead
1638
+ # @mcp.tool()
1639
+ # async def check_resource_status(
1640
+ # resource_type: str,
1641
+ # identifier: str
1642
+ # ) -> List[TextContent]:
975
1643
  """
976
1644
  Check the indexing status of a resource (repository or documentation).
977
1645
 
978
1646
  Args:
979
1647
  resource_type: Type of resource - "repository" or "documentation"
980
1648
  identifier:
981
- - For repository: Repository in owner/repo format (e.g., "facebook/react")
982
- - For documentation: Source ID (UUID format only) - use list_resources to get the UUID
983
-
984
- Returns:
985
- Current status of the resource
986
-
987
- Examples:
988
- - check_resource_status("repository", "facebook/react")
989
- - check_resource_status("documentation", "550e8400-e29b-41d4-a716-446655440000")
990
-
991
- Note:
992
- - Documentation status checking requires UUID identifiers only
993
- - Use list_resources("documentation") to find the UUID for a documentation source
1649
+ - For repos: Repository in owner/repo format
1650
+ - For docs: Source ID (UUID format only)
994
1651
  """
995
1652
  try:
996
1653
  # Validate resource type
@@ -1081,10 +1738,11 @@ async def check_resource_status(
1081
1738
  text=f"❌ Error checking {resource_type} status: {str(e)}"
1082
1739
  )]
1083
1740
 
1084
- @mcp.tool()
1085
- async def list_resources(
1086
- resource_type: Optional[str] = None
1087
- ) -> List[TextContent]:
1741
+ # DEPRECATED: Use manage_resource(action="list") instead
1742
+ # @mcp.tool()
1743
+ # async def list_resources(
1744
+ # resource_type: Optional[str] = None
1745
+ # ) -> List[TextContent]:
1088
1746
  """
1089
1747
  List indexed resources (repositories and/or documentation).
1090
1748
 
@@ -1093,11 +1751,6 @@ async def list_resources(
1093
1751
 
1094
1752
  Returns:
1095
1753
  List of indexed resources with their status
1096
-
1097
- Examples:
1098
- - list_resources() - List all resources
1099
- - list_resources("repository") - List only repositories
1100
- - list_resources("documentation") - List only documentation
1101
1754
  """
1102
1755
  try:
1103
1756
  # Validate resource type if provided
@@ -1219,9 +1872,6 @@ async def list_resources(
1219
1872
 
1220
1873
  Args:
1221
1874
  source_id: Documentation source ID to delete
1222
-
1223
- Returns:
1224
- Confirmation of deletion
1225
1875
  """
1226
1876
  try:
1227
1877
  client = await ensure_api_client()
@@ -1258,9 +1908,6 @@ async def list_resources(
1258
1908
 
1259
1909
  Args:
1260
1910
  repository: Repository in owner/repo format
1261
-
1262
- Returns:
1263
- Confirmation of deletion
1264
1911
  """
1265
1912
  try:
1266
1913
  client = await ensure_api_client()
@@ -1298,9 +1945,6 @@ async def list_resources(
1298
1945
  Args:
1299
1946
  repository: Repository in owner/repo format
1300
1947
  new_name: New display name for the repository (1-100 characters)
1301
-
1302
- Returns:
1303
- Confirmation of rename operation
1304
1948
  """
1305
1949
  try:
1306
1950
  # Validate name length
@@ -1345,9 +1989,6 @@ async def list_resources(
1345
1989
  Args:
1346
1990
  source_id: Documentation source ID
1347
1991
  new_name: New display name for the documentation (1-100 characters)
1348
-
1349
- Returns:
1350
- Confirmation of rename operation
1351
1992
  """
1352
1993
  try:
1353
1994
  # Validate name length
@@ -1393,30 +2034,14 @@ async def nia_web_search(
1393
2034
  find_similar_to: Optional[str] = None
1394
2035
  ) -> List[TextContent]:
1395
2036
  """
1396
- Search repositories, documentation, and other content using AI-powered search.
1397
- Returns results formatted to guide next actions.
1398
-
1399
- USE THIS TOOL WHEN:
1400
- - Finding specific repos/docs/content ("find X library", "trending Y frameworks")
1401
- - Looking for examples or implementations
1402
- - Searching for what's available on a topic
1403
- - Simple, direct searches that need quick results
1404
- - Finding similar content to a known URL
1405
-
1406
- DON'T USE THIS FOR:
1407
- - Comparative analysis (use nia_deep_research_agent instead)
1408
- - Complex multi-faceted questions (use nia_deep_research_agent instead)
1409
- - Questions requiring synthesis of multiple sources (use nia_deep_research_agent instead)
2037
+ Search repositories, documentation, and other content using web search.
1410
2038
 
1411
2039
  Args:
1412
2040
  query: Natural language search query (e.g., "best RAG implementations", "trending rust web frameworks")
1413
2041
  num_results: Number of results to return (default: 5, max: 10)
1414
2042
  category: Filter by category: "github", "company", "research paper", "news", "tweet", "pdf"
1415
2043
  days_back: Only show results from the last N days (for trending content)
1416
- find_similar_to: URL to find similar content to
1417
-
1418
- Returns:
1419
- Search results with actionable next steps
2044
+ find_similar_to: URL to find similar content
1420
2045
  """
1421
2046
  try:
1422
2047
  client = await ensure_api_client()
@@ -1516,13 +2141,7 @@ async def nia_web_search(
1516
2141
 
1517
2142
  # Add prominent call-to-action if we found indexable content
1518
2143
  if github_repos or documentation:
1519
- response_text += "\n## 🎯 **Ready to unlock NIA's AI capabilities?**\n"
1520
- response_text += "The repositories and documentation above can be indexed for:\n"
1521
- response_text += "- 🤖 AI-powered code understanding and search\n"
1522
- response_text += "- 💡 Instant answers to technical questions\n"
1523
- response_text += "- 🔍 Deep architectural insights\n"
1524
- response_text += "- 📚 Smart documentation Q&A\n\n"
1525
- response_text += "**Just copy and paste the index commands above!**\n"
2144
+ response_text += "\n## 🎯 **Ready to unlock nia's tools**\n"
1526
2145
 
1527
2146
  # Add search metadata
1528
2147
  response_text += f"\n---\n"
@@ -1562,27 +2181,6 @@ async def nia_deep_research_agent(
1562
2181
  ) -> List[TextContent]:
1563
2182
  """
1564
2183
  Perform deep, multi-step research on a topic using advanced AI research capabilities.
1565
- Best for complex questions that need comprehensive analysis. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
1566
-
1567
- USE THIS TOOL WHEN:
1568
- - Comparing multiple options ("compare X vs Y vs Z")
1569
- - Analyzing pros and cons
1570
- - Questions with "best", "top", "which is better"
1571
- - Needing structured analysis or synthesis
1572
- - Complex questions requiring multiple sources
1573
- - Questions about trends, patterns, or developments
1574
- - Requests for comprehensive overviews
1575
-
1576
- DON'T USE THIS FOR:
1577
- - Simple lookups (use nia_web_search instead)
1578
- - Finding a specific known item (use nia_web_search instead)
1579
- - Quick searches for repos/docs (use nia_web_search instead)
1580
-
1581
- COMPLEXITY INDICATORS:
1582
- - Words like: compare, analyze, evaluate, pros/cons, trade-offs
1583
- - Multiple criteria mentioned
1584
- - Asking for recommendations based on context
1585
- - Needing structured output (tables, lists, comparisons)
1586
2184
 
1587
2185
  Args:
1588
2186
  query: Research question (e.g., "Compare top 3 RAG frameworks with pros/cons")
@@ -1663,229 +2261,73 @@ async def nia_deep_research_agent(
1663
2261
  return urls_list
1664
2262
 
1665
2263
  # Extract all URLs from the data
1666
- all_urls = extract_urls_from_data(result["data"])
1667
-
1668
- # Filter for GitHub repos and documentation
1669
- import re
1670
- github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
1671
-
1672
- for url in all_urls:
1673
- # Check for GitHub repos
1674
- github_match = re.search(github_pattern, url)
1675
- if github_match and '/tree/' not in url and '/blob/' not in url:
1676
- repos_found.append(github_match.group(1))
1677
- # Check for documentation URLs
1678
- elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
1679
- docs_found.append(url)
1680
-
1681
- # Remove duplicates and limit results
1682
- repos_found = list(set(repos_found))[:3]
1683
- docs_found = list(set(docs_found))[:3]
1684
-
1685
- if repos_found:
1686
- response_text += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
1687
- for repo in repos_found:
1688
- response_text += f"```\nIndex {repo}\n```\n"
1689
- response_text += "✨ Enable AI-powered code search and architecture understanding!\n\n"
1690
-
1691
- if docs_found:
1692
- response_text += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
1693
- for doc in docs_found[:2]: # Limit to 2 for readability
1694
- response_text += f"```\nIndex documentation {doc}\n```\n"
1695
- response_text += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
1696
-
1697
- if not repos_found and not docs_found:
1698
- response_text += "**🔍 Manual indexing options:**\n"
1699
- response_text += "- If you see any GitHub repos mentioned: Say \"Index [owner/repo]\"\n"
1700
- response_text += "- If you see any documentation sites: Say \"Index documentation [url]\"\n"
1701
- response_text += "- These will unlock NIA's powerful AI search capabilities!\n\n"
1702
-
1703
- response_text += "**📊 Other actions:**\n"
1704
- response_text += "- Ask follow-up questions about the research\n"
1705
- response_text += "- Request a different analysis format\n"
1706
- response_text += "- Search for more specific information\n"
1707
- else:
1708
- response_text += "No structured data returned. The research may need a more specific query."
1709
-
1710
- return [TextContent(type="text", text=response_text)]
1711
-
1712
- except APIError as e:
1713
- logger.error(f"API Error in deep research: {e}")
1714
- if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
1715
- if e.detail and "3 free indexing operations" in e.detail:
1716
- return [TextContent(
1717
- type="text",
1718
- text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
1719
- )]
1720
- else:
1721
- return [TextContent(
1722
- type="text",
1723
- text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1724
- )]
1725
- else:
1726
- return [TextContent(type="text", text=f"❌ {str(e)}")]
1727
- except Exception as e:
1728
- logger.error(f"Error in deep research: {str(e)}")
1729
- return [TextContent(
1730
- type="text",
1731
- text=f"❌ Research error: {str(e)}\n\n"
1732
- "Try simplifying your question or using the regular nia_web_search tool."
1733
- )]
1734
-
1735
- @mcp.tool()
1736
- async def initialize_project(
1737
- project_root: str,
1738
- profiles: Optional[List[str]] = None
1739
- ) -> List[TextContent]:
1740
- """
1741
- Initialize a NIA-enabled project with IDE-specific rules and configurations.
1742
-
1743
- This tool sets up your project with NIA integration, creating configuration files
1744
- and rules tailored to your IDE or editor. It enables AI assistants to better
1745
- understand and work with NIA's knowledge search capabilities.
1746
-
1747
- Args:
1748
- project_root: Absolute path to the project root directory
1749
- profiles: List of IDE profiles to set up (default: ["cursor"]).
1750
- Options: cursor, vscode, claude, windsurf, cline, codex, zed, jetbrains, neovim, sublime
1751
-
1752
- Returns:
1753
- Status of the initialization with created files and next steps
1754
-
1755
- Examples:
1756
- - Basic: initialize_project("/path/to/project")
1757
- - Multiple IDEs: initialize_project("/path/to/project", profiles=["cursor", "vscode"])
1758
- - Specific IDE: initialize_project("/path/to/project", profiles=["windsurf"])
1759
- """
1760
- try:
1761
- # Validate project root
1762
- project_path = Path(project_root)
1763
- if not project_path.is_absolute():
1764
- return [TextContent(
1765
- type="text",
1766
- text=f"❌ Error: project_root must be an absolute path. Got: {project_root}"
1767
- )]
1768
-
1769
- # Default to cursor profile if none specified
1770
- if profiles is None:
1771
- profiles = ["cursor"]
1772
-
1773
- # Validate profiles
1774
- supported = get_supported_profiles()
1775
- invalid_profiles = [p for p in profiles if p not in supported]
1776
- if invalid_profiles:
1777
- return [TextContent(
1778
- type="text",
1779
- text=f"❌ Unknown profiles: {', '.join(invalid_profiles)}\n\n"
1780
- f"Supported profiles: {', '.join(supported)}"
1781
- )]
1782
-
1783
- logger.info(f"Initializing NIA project at {project_root} with profiles: {profiles}")
1784
-
1785
- # Initialize the project
1786
- result = initialize_nia_project(
1787
- project_root=project_root,
1788
- profiles=profiles
1789
- )
1790
-
1791
- if not result.get("success"):
1792
- return [TextContent(
1793
- type="text",
1794
- text=f"❌ Failed to initialize project: {result.get('error', 'Unknown error')}"
1795
- )]
1796
-
1797
- # Format success response
1798
- response_lines = [
1799
- f"✅ Successfully initialized NIA project at: {project_root}",
1800
- "",
1801
- "## 📁 Created Files:",
1802
- ]
1803
-
1804
- for file in result.get("files_created", []):
1805
- response_lines.append(f"- {file}")
1806
-
1807
- if result.get("profiles_initialized"):
1808
- response_lines.extend([
1809
- "",
1810
- "## 🎨 Initialized Profiles:",
1811
- ])
1812
- for profile in result["profiles_initialized"]:
1813
- response_lines.append(f"- {profile}")
1814
-
1815
- if result.get("warnings"):
1816
- response_lines.extend([
1817
- "",
1818
- "## ⚠️ Warnings:",
1819
- ])
1820
- for warning in result["warnings"]:
1821
- response_lines.append(f"- {warning}")
1822
-
1823
- if result.get("next_steps"):
1824
- response_lines.extend([
1825
- "",
1826
- "## 🚀 Next Steps:",
1827
- ])
1828
- for i, step in enumerate(result["next_steps"], 1):
1829
- response_lines.append(f"{i}. {step}")
1830
-
1831
- # Add profile-specific instructions
1832
- response_lines.extend([
1833
- "",
1834
- "## 💡 Quick Start:",
1835
- ])
1836
-
1837
- if "cursor" in profiles:
1838
- response_lines.extend([
1839
- "**For Cursor:**",
1840
- "1. Restart Cursor to load the NIA MCP server",
1841
- "2. Run `list_repositories` to verify connection",
1842
- "3. Start indexing with `index_repository https://github.com/owner/repo`",
1843
- ""
1844
- ])
1845
-
1846
- if "vscode" in profiles:
1847
- response_lines.extend([
1848
- "**For VSCode:**",
1849
- "1. Reload the VSCode window (Cmd/Ctrl+R)",
1850
- "2. Open command palette (Cmd/Ctrl+Shift+P)",
1851
- "3. Run 'NIA: Index Repository' task",
1852
- ""
1853
- ])
1854
-
1855
- if "claude" in profiles:
1856
- response_lines.extend([
1857
- "**For Claude Desktop:**",
1858
- "1. The .claude directory has been created",
1859
- "2. Claude will now understand NIA commands",
1860
- "3. Try: 'Search for authentication patterns'",
1861
- ""
1862
- ])
1863
-
1864
- # Add general tips
1865
- response_lines.extend([
1866
- "## 📚 Tips:",
1867
- "- Use natural language for searches: 'How does X work?'",
1868
- "- Index repositories before searching them",
1869
- "- Use `nia_web_search` to discover new repositories",
1870
- "- Check `list_repositories` to see what's already indexed",
1871
- "",
1872
- "Ready to supercharge your development with AI-powered code search! 🚀"
1873
- ])
2264
+ all_urls = extract_urls_from_data(result["data"])
2265
+
2266
+ # Filter for GitHub repos and documentation
2267
+ import re
2268
+ github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
2269
+
2270
+ for url in all_urls:
2271
+ # Check for GitHub repos
2272
+ github_match = re.search(github_pattern, url)
2273
+ if github_match and '/tree/' not in url and '/blob/' not in url:
2274
+ repos_found.append(github_match.group(1))
2275
+ # Check for documentation URLs
2276
+ elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
2277
+ docs_found.append(url)
2278
+
2279
+ # Remove duplicates and limit results
2280
+ repos_found = list(set(repos_found))[:3]
2281
+ docs_found = list(set(docs_found))[:3]
2282
+
2283
+ if repos_found:
2284
+ response_text += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
2285
+ for repo in repos_found:
2286
+ response_text += f"```\nIndex {repo}\n```\n"
2287
+ response_text += "✨ Enable AI-powered code search and architecture understanding!\n\n"
2288
+
2289
+ if docs_found:
2290
+ response_text += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
2291
+ for doc in docs_found[:2]: # Limit to 2 for readability
2292
+ response_text += f"```\nIndex documentation {doc}\n```\n"
2293
+ response_text += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
2294
+
2295
+ if not repos_found and not docs_found:
2296
+ response_text += "**🔍 Manual indexing options:**\n"
2297
+ response_text += "- If you see any GitHub repos mentioned: Say \"Index [owner/repo]\"\n"
2298
+ response_text += "- If you see any documentation sites: Say \"Index documentation [url]\"\n"
2299
+ response_text += "- These will unlock NIA's powerful AI search capabilities!\n\n"
2300
+
2301
+ response_text += "**📊 Other actions:**\n"
2302
+ response_text += "- Ask follow-up questions about the research\n"
2303
+ response_text += "- Request a different analysis format\n"
2304
+ response_text += "- Search for more specific information\n"
2305
+ else:
2306
+ response_text += "No structured data returned. The research may need a more specific query."
1874
2307
 
1875
- return [TextContent(
1876
- type="text",
1877
- text="\n".join(response_lines)
1878
- )]
2308
+ return [TextContent(type="text", text=response_text)]
1879
2309
 
2310
+ except APIError as e:
2311
+ logger.error(f"API Error in deep research: {e}")
2312
+ if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
2313
+ if e.detail and "3 free indexing operations" in e.detail:
2314
+ return [TextContent(
2315
+ type="text",
2316
+ text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
2317
+ )]
2318
+ else:
2319
+ return [TextContent(
2320
+ type="text",
2321
+ text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
2322
+ )]
2323
+ else:
2324
+ return [TextContent(type="text", text=f"❌ {str(e)}")]
1880
2325
  except Exception as e:
1881
- logger.error(f"Error in initialize_project tool: {e}")
2326
+ logger.error(f"Error in deep research: {str(e)}")
1882
2327
  return [TextContent(
1883
2328
  type="text",
1884
- text=f"❌ Error initializing project: {str(e)}\n\n"
1885
- "Please check:\n"
1886
- "- The project_root path is correct and accessible\n"
1887
- "- You have write permissions to the directory\n"
1888
- "- The NIA MCP server is properly installed"
2329
+ text=f"❌ Research error: {str(e)}\n\n"
2330
+ "Try simplifying your question or using the regular nia_web_search tool."
1889
2331
  )]
1890
2332
 
1891
2333
  @mcp.tool()
@@ -1897,22 +2339,15 @@ async def read_source_content(
1897
2339
  """
1898
2340
  Read the full content of a specific source file or document.
1899
2341
 
1900
- This tool allows AI to fetch complete content from sources identified during search,
1901
- enabling deeper analysis when the truncated search results are insufficient.
1902
-
1903
2342
  Args:
1904
2343
  source_type: Type of source - "repository" or "documentation"
1905
2344
  source_identifier:
1906
- - For repository: "owner/repo:path/to/file.py" (e.g., "facebook/react:src/React.js")
2345
+ - For repository: "owner/repo:path/to/file.py"
1907
2346
  - For documentation: The source URL or document ID
1908
2347
  metadata: Optional metadata from search results to help locate the source
1909
2348
 
1910
2349
  Returns:
1911
2350
  Full content of the requested source with metadata
1912
-
1913
- Examples:
1914
- - read_source_content("repository", "langchain-ai/langchain:libs/core/langchain_core/runnables/base.py")
1915
- - read_source_content("documentation", "https://docs.python.org/3/library/asyncio.html")
1916
2351
  """
1917
2352
  try:
1918
2353
  client = await ensure_api_client()
@@ -2015,7 +2450,6 @@ async def read_source_content(
2015
2450
  type="text",
2016
2451
  text=f"❌ Error reading source content: {str(e)}"
2017
2452
  )]
2018
-
2019
2453
  # @mcp.tool()
2020
2454
  # async def index_local_filesystem(
2021
2455
  # directory_path: str,
@@ -2432,6 +2866,7 @@ async def read_source_content(
2432
2866
  # - npm: Node.js packages from NPM registry
2433
2867
  # - crates_io: Rust packages from crates.io
2434
2868
  # - golang_proxy: Go modules from Go proxy
2869
+ # - ruby_gems: Ruby gems from RubyGems.org
2435
2870
  #
2436
2871
  # Authentication:
2437
2872
  # - Requires CHROMA_API_KEY environment variable
@@ -2467,8 +2902,6 @@ async def nia_package_search_grep(
2467
2902
  Required Args: "registry", "package_name", "pattern" Optional Args: "version", "language",
2468
2903
  "filename_sha256", "a", "b", "c", "head_limit", "output_mode"
2469
2904
 
2470
- Best for: Deterministic code search, finding specific code patterns, or exploring code structure.
2471
-
2472
2905
  Parameters:
2473
2906
  a: The number of lines after a grep match to include
2474
2907
  b: The number of lines before a grep match to include
@@ -2476,23 +2909,19 @@ async def nia_package_search_grep(
2476
2909
  filename_sha256: The sha256 hash of the file to filter for
2477
2910
  head_limit: Limits number of results returned. If the number of results returned is less than the
2478
2911
  head limit, all results have been returned.
2479
- language: The languages to filter for. If not provided, all languages will be searched. Valid
2480
- options: "Rust", "Go", "Python", "JavaScript", "JSX", "TypeScript", "TSX", "HTML", "Markdown",
2481
- "YAML", "Bash", "SQL", "JSON", "Text", "Dockerfile", "HCL", "Protobuf", "Make", "Toml", "Jupyter Notebook"
2912
+ language: The languages to filter for. If not provided, all languages will be searched.
2482
2913
  output_mode: Controls the shape of the grep output. Accepted values:
2483
2914
  "content" (default): return content snippets with line ranges
2484
2915
  "files_with_matches": return unique files (path and sha256) that match
2485
2916
  "count": return files with the count of matches per file
2486
2917
  package_name: The name of the requested package. Pass the name as it appears in the package
2487
2918
  manager. For Go packages, use the GitHub organization and repository name in the format
2488
- {org}/{repo}, if unsure check the GitHub URL for the package and use {org}/{repo} from that URL.
2919
+ {org}/{repo}
2489
2920
  pattern: The regex pattern for exact text matching in the codebase. Must be a valid regex.
2490
2921
  Example: "func\\s+\\(get_repository\\|getRepository\\)\\s*\\(.*?\\)\\s\\{"
2491
2922
  registry: The name of the registry containing the requested package. Must be one of:
2492
- "crates_io", "golang_proxy", "npm", or "py_pi".
2493
- version: Optionally, the specific version of the package whose source code to search.
2494
- If provided, must be in semver format: {major}.{minor}.{patch}. Otherwise, the latest indexed
2495
- version of the package available will be used.
2923
+ "crates_io", "golang_proxy", "npm", "py_pi", or "ruby_gems".
2924
+ version: Optional
2496
2925
  """
2497
2926
  try:
2498
2927
  # Use API client for backend routing
@@ -2604,33 +3033,24 @@ async def nia_package_search_hybrid(
2604
3033
  language: Optional[str] = None
2605
3034
  ) -> List[TextContent]:
2606
3035
  """
2607
- Searches package source code using semantic understanding AND optionally regex patterns. This
2608
- allows for hybrid search, allowing for prefiltering with regex, and semantic ranking.
3036
+ Searches package source code using semantic understanding AND optionally regex patterns.
2609
3037
 
2610
3038
  Required Args: "registry", "package_name", "semantic_queries"
2611
3039
 
2612
3040
  Optional Args: "version", "filename_sha256", "pattern", "language"
2613
3041
 
2614
- Best for: Understanding how packages implement specific features, finding usage patterns, or
2615
- exploring code structure.
2616
-
2617
3042
  Parameters:
2618
3043
  filename_sha256: The sha256 hash of the file to filter for
2619
- language: The languages to filter for. If not provided, all languages will be searched. Valid
2620
- options: "Rust", "Go", "Python", "JavaScript", "JSX", "TypeScript", "TSX", "HTML", "Markdown",
2621
- "YAML", "Bash", "SQL", "JSON", "Text", "Dockerfile", "HCL", "Protobuf", "Make", "Toml", "Jupyter Notebook"
2622
- package_name: The name of the requested package. Pass the name as it appears in the package
2623
- manager. For Go packages, use the GitHub organization and repository name in the format
2624
- {org}/{repo}, if unsure check the GitHub URL for the package and use {org}/{repo} from that URL.
3044
+ language: The languages to filter for. If not provided, all languages will be searched.
3045
+ package_name: The name of the requested package. Pass the name as it appears in the package manager. For Go packages, use the GitHub organization and repository name in the format
3046
+ {org}/{repo}
2625
3047
  pattern: The regex pattern for exact text matching in the codebase. Must be a valid regex.
2626
3048
  Example: "func\\s+\\(get_repository\\|getRepository\\)\\s*\\(.*?\\)\\s\\{"
2627
3049
  registry: The name of the registry containing the requested package. Must be one of:
2628
- "crates_io", "golang_proxy", "npm", or "py_pi".
3050
+ "crates_io", "golang_proxy", "npm", "py_pi", or "ruby_gems".
2629
3051
  semantic_queries: Array of 1-5 plain English questions about the codebase. Example: ["how is
2630
3052
  argmax implemented in numpy?", "what testing patterns does axum use?"]
2631
- version: Optionally, the specific version of the package whose source code to search.
2632
- If provided, must be in semver format: {major}.{minor}.{patch}. Otherwise, the latest indexed
2633
- version of the package available will be used.
3053
+ version: Optional
2634
3054
  """
2635
3055
  try:
2636
3056
  # Use API client for backend routing
@@ -2748,21 +3168,16 @@ async def nia_package_search_read_file(
2748
3168
  Required Args: "registry", "package_name", "filename_sha256", "start_line", "end_line" Optional Args:
2749
3169
  "version"
2750
3170
 
2751
- Best for: Inspecting exact code snippets when you already know the file and line numbers. Max 200
2752
- lines.
2753
-
2754
3171
  Parameters:
2755
3172
  end_line: 1-based inclusive end line to read
2756
3173
  filename_sha256: The sha256 hash of the file to filter for
2757
3174
  package_name: The name of the requested package. Pass the name as it appears in the package
2758
3175
  manager. For Go packages, use the GitHub organization and repository name in the format
2759
- {org}/{repo}, if unsure check the GitHub URL for the package and use {org}/{repo} from that URL.
3176
+ {org}/{repo}
2760
3177
  registry: The name of the registry containing the requested package. Must be one of:
2761
- "crates_io", "golang_proxy", "npm", or "py_pi".
3178
+ "crates_io", "golang_proxy", "npm", "py_pi", or "ruby_gems".
2762
3179
  start_line: 1-based inclusive start line to read
2763
- version: Optionally, the specific version of the package whose source code to search.
2764
- If provided, must be in semver format: {major}.{minor}.{patch}. Otherwise, the latest indexed
2765
- version of the package available will be used.
3180
+ version: Optional
2766
3181
  """
2767
3182
  try:
2768
3183
  # Validate line range
@@ -2833,24 +3248,11 @@ async def nia_bug_report(
2833
3248
  additional_context: Optional[str] = None
2834
3249
  ) -> List[TextContent]:
2835
3250
  """
2836
- Submit a bug report or feature request to the Nia development team.
2837
-
2838
- This tool allows users to report bugs, request features, or provide feedback
2839
- directly to the development team. Reports are sent via email and Slack for
2840
- immediate attention.
2841
-
3251
+ Submit a bug report or feature request.
2842
3252
  Args:
2843
3253
  description: Detailed description of the bug or feature request (10-5000 characters)
2844
3254
  bug_type: Type of report - "bug", "feature-request", "improvement", or "other" (default: "bug")
2845
3255
  additional_context: Optional additional context, steps to reproduce, or related information
2846
-
2847
- Returns:
2848
- Confirmation of successful submission with reference ID
2849
-
2850
- Examples:
2851
- - nia_bug_report("The search is not returning any results for my repository")
2852
- - nia_bug_report("Add support for searching within specific file types", "feature-request")
2853
- - nia_bug_report("Repository indexing fails with large repos", "bug", "Happens with repos over 1GB")
2854
3256
  """
2855
3257
  try:
2856
3258
  client = await ensure_api_client()
@@ -2934,28 +3336,553 @@ async def nia_bug_report(
2934
3336
  # Context Sharing Tools
2935
3337
 
2936
3338
  @mcp.tool()
2937
- async def save_context(
2938
- title: str,
2939
- summary: str,
2940
- content: str,
2941
- agent_source: str,
3339
+ async def context(
3340
+ action: str,
3341
+ # For save action
3342
+ title: Optional[str] = None,
3343
+ summary: Optional[str] = None,
3344
+ content: Optional[str] = None,
3345
+ agent_source: Optional[str] = None,
2942
3346
  tags: Optional[List[str]] = None,
2943
3347
  metadata: Optional[dict] = None,
2944
3348
  nia_references: Optional[dict] = None,
2945
- edited_files: Optional[List[dict]] = None
3349
+ edited_files: Optional[List[dict]] = None,
3350
+ # For list/search actions
3351
+ limit: int = 20,
3352
+ offset: int = 0,
3353
+ # For retrieve/update/delete actions
3354
+ context_id: Optional[str] = None,
3355
+ # For search action
3356
+ query: Optional[str] = None
2946
3357
  ) -> List[TextContent]:
2947
3358
  """
2948
- Save a conversation context for cross-agent sharing.
3359
+ Unified context management tool for saving, listing, retrieving, searching, updating, and deleting conversation contexts.
3360
+
3361
+ Args:
3362
+ action: Action to perform - "save", "list", "retrieve", "search", "update", or "delete"
3363
+
3364
+ # Save action parameters (all required except tags, metadata, nia_references, edited_files):
3365
+ title: Descriptive title for the context
3366
+ summary: Brief summary of the conversation (10-1000 chars)
3367
+ content: Full conversation context (minimum 50 chars)
3368
+ agent_source: Which agent is creating this context (e.g., "cursor", "claude-code")
3369
+ tags: Optional list of searchable tags
3370
+ metadata: Optional metadata like file paths, repositories discussed, etc.
3371
+ nia_references: Structured data about NIA resources used during conversation
3372
+ edited_files: List of files that were modified during conversation
3373
+
3374
+ # List action parameters (all optional):
3375
+ limit: Number of contexts to return (1-100, default: 20)
3376
+ offset: Number of contexts to skip for pagination (default: 0)
3377
+
3378
+ # Retrieve action parameters:
3379
+ context_id: The unique ID of the context to retrieve (required)
3380
+
3381
+ # Search action parameters:
3382
+ query: Search query to match against title, summary, content, and tags (required)
3383
+ limit: Maximum number of results to return (1-100, default: 20)
3384
+
3385
+ # Update action parameters:
3386
+ context_id: The unique ID of the context to update (required)
3387
+ title: Updated title (optional)
3388
+ summary: Updated summary (optional)
3389
+ content: Updated content (optional)
3390
+ tags: Updated tags list (optional)
3391
+ metadata: Updated metadata (optional)
3392
+
3393
+ # Delete action parameters:
3394
+ context_id: The unique ID of the context to delete (required)
3395
+
3396
+ Examples:
3397
+ # Save context
3398
+ context(action="save", title="API Design", summary="Discussed REST patterns",
3399
+ content="Full conversation...", agent_source="cursor")
3400
+
3401
+ # List contexts
3402
+ context(action="list", limit=10, agent_source="cursor")
3403
+
3404
+ # Retrieve context
3405
+ context(action="retrieve", context_id="550e8400-e29b-41d4-a716-446655440000")
3406
+
3407
+ # Search contexts
3408
+ context(action="search", query="authentication", limit=10)
3409
+
3410
+ # Update context
3411
+ context(action="update", context_id="550e8400...", title="New Title", tags=["updated"])
3412
+
3413
+ # Delete context
3414
+ context(action="delete", context_id="550e8400-e29b-41d4-a716-446655440000")
3415
+ """
3416
+ try:
3417
+ # Validate action
3418
+ valid_actions = ["save", "list", "retrieve", "search", "update", "delete"]
3419
+ if action not in valid_actions:
3420
+ return [TextContent(
3421
+ type="text",
3422
+ text=f"❌ Invalid action: '{action}'. Must be one of: {', '.join(valid_actions)}"
3423
+ )]
3424
+
3425
+ client = await ensure_api_client()
3426
+
3427
+ # ===== SAVE ACTION =====
3428
+ if action == "save":
3429
+ # Validate required parameters
3430
+ if not title or not title.strip():
3431
+ return [TextContent(type="text", text="❌ Error: title is required for save action")]
3432
+ if not summary:
3433
+ return [TextContent(type="text", text="❌ Error: summary is required for save action")]
3434
+ if not content:
3435
+ return [TextContent(type="text", text="❌ Error: content is required for save action")]
3436
+ if not agent_source or not agent_source.strip():
3437
+ return [TextContent(type="text", text="❌ Error: agent_source is required for save action")]
3438
+
3439
+ # Validate field lengths
3440
+ if len(title) > 200:
3441
+ return [TextContent(type="text", text="❌ Error: Title must be 200 characters or less")]
3442
+ if len(summary) < 10 or len(summary) > 1000:
3443
+ return [TextContent(type="text", text="❌ Error: Summary must be 10-1000 characters")]
3444
+ if len(content) < 50:
3445
+ return [TextContent(type="text", text="❌ Error: Content must be at least 50 characters")]
3446
+
3447
+ logger.info(f"Saving context: title='{title}', agent={agent_source}, content_length={len(content)}")
3448
+
3449
+ result = await client.save_context(
3450
+ title=title.strip(),
3451
+ summary=summary.strip(),
3452
+ content=content,
3453
+ agent_source=agent_source.strip(),
3454
+ tags=tags or [],
3455
+ metadata=metadata or {},
3456
+ nia_references=nia_references,
3457
+ edited_files=edited_files or []
3458
+ )
3459
+
3460
+ context_id_result = result.get("id")
3461
+
3462
+ return [TextContent(
3463
+ type="text",
3464
+ text=f"✅ **Context Saved Successfully!**\n\n"
3465
+ f"🆔 **Context ID:** `{context_id_result}`\n"
3466
+ f"📝 **Title:** {title}\n"
3467
+ f"🤖 **Source Agent:** {agent_source}\n"
3468
+ f"📊 **Content Length:** {len(content):,} characters\n"
3469
+ f"🏷️ **Tags:** {', '.join(tags) if tags else 'None'}\n\n"
3470
+ f"**Next Steps:**\n"
3471
+ f"• Other agents can now retrieve this context using the context ID\n"
3472
+ f"• Use `context(action='search', query='...')` to find contexts\n"
3473
+ f"• Use `context(action='list')` to see all your saved contexts\n\n"
3474
+ f"🔗 **Share this context:** Provide the context ID `{context_id_result}` to other agents"
3475
+ )]
3476
+
3477
+ # ===== LIST ACTION =====
3478
+ elif action == "list":
3479
+ # Validate parameters
3480
+ if limit < 1 or limit > 100:
3481
+ return [TextContent(type="text", text="❌ Error: Limit must be between 1 and 100")]
3482
+ if offset < 0:
3483
+ return [TextContent(type="text", text="❌ Error: Offset must be 0 or greater")]
3484
+
3485
+ # Convert tags list to comma-separated string if provided
3486
+ tags_filter = ','.join(tags) if tags and isinstance(tags, list) else (tags if isinstance(tags, str) else None)
3487
+
3488
+ result = await client.list_contexts(
3489
+ limit=limit,
3490
+ offset=offset,
3491
+ tags=tags_filter,
3492
+ agent_source=agent_source
3493
+ )
3494
+
3495
+ contexts = result.get("contexts", [])
3496
+ pagination = result.get("pagination", {})
3497
+
3498
+ if not contexts:
3499
+ response = "📭 **No Contexts Found**\n\n"
3500
+ if tags or agent_source:
3501
+ response += "No contexts match your filters.\n\n"
3502
+ else:
3503
+ response += "You haven't saved any contexts yet.\n\n"
3504
+
3505
+ response += "**Get started:**\n"
3506
+ response += "• Use `context(action='save', ...)` to save a conversation for cross-agent sharing\n"
3507
+ response += "• Perfect for handoffs between Cursor and Claude Code!"
3508
+
3509
+ return [TextContent(type="text", text=response)]
3510
+
3511
+ # Format the response
3512
+ response = f"📚 **Your Conversation Contexts** ({pagination.get('total', len(contexts))} total)\n\n"
3513
+
3514
+ for i, ctx in enumerate(contexts, offset + 1):
3515
+ created_at = ctx.get('created_at', '')
3516
+ if created_at:
3517
+ try:
3518
+ from datetime import datetime
3519
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3520
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3521
+ except:
3522
+ formatted_date = created_at
3523
+ else:
3524
+ formatted_date = 'Unknown'
3525
+
3526
+ response += f"**{i}. {ctx['title']}**\n"
3527
+ response += f" 🆔 ID: `{ctx['id']}`\n"
3528
+ response += f" 🤖 Source: {ctx['agent_source']}\n"
3529
+ response += f" 📅 Created: {formatted_date}\n"
3530
+ response += f" 📝 Summary: {ctx['summary'][:100]}{'...' if len(ctx['summary']) > 100 else ''}\n"
3531
+ if ctx.get('tags'):
3532
+ response += f" 🏷️ Tags: {', '.join(ctx['tags'])}\n"
3533
+ response += "\n"
3534
+
3535
+ # Add pagination info
3536
+ if pagination.get('has_more'):
3537
+ next_offset = offset + limit
3538
+ response += f"📄 **Pagination:** Showing {offset + 1}-{offset + len(contexts)} of {pagination.get('total')}\n"
3539
+ response += f" Use `context(action='list', offset={next_offset})` for next page\n"
3540
+
3541
+ response += "\n**Actions:**\n"
3542
+ response += "• `context(action='retrieve', context_id='...')` - Get full context\n"
3543
+ response += "• `context(action='search', query='...')` - Search contexts\n"
3544
+ response += "• `context(action='delete', context_id='...')` - Remove context"
3545
+
3546
+ return [TextContent(type="text", text=response)]
3547
+
3548
+ # ===== RETRIEVE ACTION =====
3549
+ elif action == "retrieve":
3550
+ if not context_id or not context_id.strip():
3551
+ return [TextContent(type="text", text="❌ Error: context_id is required for retrieve action")]
3552
+
3553
+ ctx = await client.get_context(context_id.strip())
3554
+
3555
+ if not ctx:
3556
+ return [TextContent(
3557
+ type="text",
3558
+ text=f"❌ **Context Not Found**\n\n"
3559
+ f"Context ID `{context_id}` was not found.\n\n"
3560
+ f"**Possible reasons:**\n"
3561
+ f"• The context ID is incorrect\n"
3562
+ f"• The context belongs to a different user\n"
3563
+ f"• The context has been deleted\n\n"
3564
+ f"Use `context(action='list')` to see your available contexts."
3565
+ )]
3566
+
3567
+ # Format the context display
3568
+ created_at = ctx.get('created_at', '')
3569
+ if created_at:
3570
+ try:
3571
+ from datetime import datetime
3572
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3573
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3574
+ except:
3575
+ formatted_date = created_at
3576
+ else:
3577
+ formatted_date = 'Unknown'
3578
+
3579
+ updated_at = ctx.get('updated_at', '')
3580
+ formatted_updated = None
3581
+ if updated_at:
3582
+ try:
3583
+ from datetime import datetime
3584
+ dt = datetime.fromisoformat(updated_at.replace('Z', '+00:00'))
3585
+ formatted_updated = dt.strftime('%Y-%m-%d %H:%M UTC')
3586
+ except:
3587
+ formatted_updated = updated_at
3588
+
3589
+ response = f"📋 **Context: {ctx['title']}**\n\n"
3590
+ response += f"🆔 **ID:** `{ctx['id']}`\n"
3591
+ response += f"🤖 **Source Agent:** {ctx['agent_source']}\n"
3592
+ response += f"📅 **Created:** {formatted_date}\n"
3593
+ if formatted_updated:
3594
+ response += f"🔄 **Updated:** {formatted_updated}\n"
3595
+
3596
+ if ctx.get('tags'):
3597
+ response += f"🏷️ **Tags:** {', '.join(ctx['tags'])}\n"
3598
+
3599
+ response += f"\n📝 **Summary:**\n{ctx['summary']}\n\n"
3600
+
3601
+ # Add NIA References
3602
+ nia_refs = ctx.get('nia_references') or {}
3603
+ if nia_refs:
3604
+ response += "🧠 **NIA RESOURCES USED - RECOMMENDED ACTIONS:**\n"
3605
+
3606
+ indexed_resources = nia_refs.get('indexed_resources', [])
3607
+ if indexed_resources:
3608
+ response += "**📦 Re-index these resources:**\n"
3609
+ for resource in indexed_resources:
3610
+ identifier = resource.get('identifier', 'Unknown')
3611
+ resource_type = resource.get('resource_type', 'unknown')
3612
+ purpose = resource.get('purpose', 'No purpose specified')
3613
+
3614
+ if resource_type == 'repository':
3615
+ response += f"• `Index {identifier}` - {purpose}\n"
3616
+ elif resource_type == 'documentation':
3617
+ response += f"• `Index documentation {identifier}` - {purpose}\n"
3618
+ else:
3619
+ response += f"• `Index {identifier}` ({resource_type}) - {purpose}\n"
3620
+ response += "\n"
3621
+
3622
+ search_queries = nia_refs.get('search_queries', [])
3623
+ if search_queries:
3624
+ response += "**🔍 Useful search queries to re-run:**\n"
3625
+ for q in search_queries:
3626
+ query_text = q.get('query', 'Unknown query')
3627
+ query_type = q.get('query_type', 'search')
3628
+ key_findings = q.get('key_findings', 'No findings specified')
3629
+ resources_searched = q.get('resources_searched', [])
3630
+
3631
+ response += f"• **Query:** `{query_text}` ({query_type})\n"
3632
+ if resources_searched:
3633
+ response += f" **Resources:** {', '.join(resources_searched)}\n"
3634
+ response += f" **Key Findings:** {key_findings}\n"
3635
+ response += "\n"
3636
+
3637
+ session_summary = nia_refs.get('session_summary')
3638
+ if session_summary:
3639
+ response += f"**📋 NIA Session Summary:** {session_summary}\n\n"
3640
+
3641
+ # Add Edited Files
3642
+ edited_files_list = ctx.get('edited_files') or []
3643
+ if edited_files_list:
3644
+ response += "📝 **FILES MODIFIED - READ THESE TO GET UP TO SPEED:**\n"
3645
+ for file_info in edited_files_list:
3646
+ file_path = file_info.get('file_path', 'Unknown file')
3647
+ operation = file_info.get('operation', 'modified')
3648
+ changes_desc = file_info.get('changes_description', 'No description')
3649
+ key_changes = file_info.get('key_changes', [])
3650
+ language = file_info.get('language', '')
3651
+
3652
+ operation_emoji = {
3653
+ 'created': '🆕',
3654
+ 'modified': '✏️',
3655
+ 'deleted': '🗑️'
3656
+ }.get(operation, '📄')
3657
+
3658
+ response += f"• {operation_emoji} **`{file_path}`** ({operation})\n"
3659
+ response += f" **Changes:** {changes_desc}\n"
3660
+
3661
+ if key_changes:
3662
+ response += f" **Key Changes:** {', '.join(key_changes)}\n"
3663
+ if language:
3664
+ response += f" **Language:** {language}\n"
3665
+
3666
+ response += f" **💡 Action:** Read this file with: `Read {file_path}`\n"
3667
+ response += "\n"
3668
+
3669
+ # Add metadata if available
3670
+ metadata_dict = ctx.get('metadata') or {}
3671
+ if metadata_dict:
3672
+ response += f"📊 **Additional Metadata:**\n"
3673
+ for key, value in metadata_dict.items():
3674
+ if isinstance(value, list):
3675
+ response += f"• **{key}:** {', '.join(map(str, value))}\n"
3676
+ else:
3677
+ response += f"• **{key}:** {value}\n"
3678
+ response += "\n"
3679
+
3680
+ response += f"📄 **Full Context:**\n\n{ctx['content']}\n\n"
3681
+
3682
+ response += f"---\n"
3683
+ response += f"🚀 **NEXT STEPS FOR SEAMLESS HANDOFF:**\n"
3684
+ response += f"• This context was created by **{ctx['agent_source']}**\n"
3685
+
3686
+ if nia_refs.get('search_queries'):
3687
+ response += f"• **RECOMMENDED:** Re-run the search queries to get the same insights\n"
3688
+ if edited_files_list:
3689
+ response += f"• **ESSENTIAL:** Read the modified files above to understand code changes\n"
3690
+
3691
+ response += f"• Use the summary and full context to understand the strategic planning\n"
3692
+
3693
+ return [TextContent(type="text", text=response)]
3694
+
3695
+ # ===== SEARCH ACTION =====
3696
+ elif action == "search":
3697
+ if not query or not query.strip():
3698
+ return [TextContent(type="text", text="❌ Error: query is required for search action")]
3699
+
3700
+ if limit < 1 or limit > 100:
3701
+ return [TextContent(type="text", text="❌ Error: Limit must be between 1 and 100")]
3702
+
3703
+ # Convert tags list to comma-separated string if provided
3704
+ tags_filter = ','.join(tags) if tags and isinstance(tags, list) else (tags if isinstance(tags, str) else None)
3705
+
3706
+ result = await client.search_contexts(
3707
+ query=query.strip(),
3708
+ limit=limit,
3709
+ tags=tags_filter,
3710
+ agent_source=agent_source
3711
+ )
3712
+
3713
+ contexts = result.get("contexts", [])
3714
+
3715
+ if not contexts:
3716
+ response = f"🔍 **No Results Found**\n\n"
3717
+ response += f"No contexts match your search query: \"{query}\"\n\n"
3718
+
3719
+ if tags or agent_source:
3720
+ response += f"**Active filters:**\n"
3721
+ if tags:
3722
+ response += f"• Tags: {tags if isinstance(tags, str) else ', '.join(tags)}\n"
3723
+ if agent_source:
3724
+ response += f"• Agent: {agent_source}\n"
3725
+ response += "\n"
3726
+
3727
+ response += f"**Suggestions:**\n"
3728
+ response += f"• Try different keywords\n"
3729
+ response += f"• Remove filters to broaden search\n"
3730
+ response += f"• Use `context(action='list')` to see all contexts"
3731
+
3732
+ return [TextContent(type="text", text=response)]
3733
+
3734
+ # Format search results
3735
+ response = f"🔍 **Search Results for \"{query}\"** ({len(contexts)} found)\n\n"
3736
+
3737
+ for i, ctx in enumerate(contexts, 1):
3738
+ created_at = ctx.get('created_at', '')
3739
+ if created_at:
3740
+ try:
3741
+ from datetime import datetime
3742
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3743
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3744
+ except:
3745
+ formatted_date = created_at
3746
+ else:
3747
+ formatted_date = 'Unknown'
3748
+
3749
+ response += f"**{i}. {ctx['title']}**\n"
3750
+ response += f" 🆔 ID: `{ctx['id']}`\n"
3751
+ response += f" 🤖 Source: {ctx['agent_source']}\n"
3752
+ response += f" 📅 Created: {formatted_date}\n"
3753
+ response += f" 📝 Summary: {ctx['summary'][:150]}{'...' if len(ctx['summary']) > 150 else ''}\n"
3754
+
3755
+ if ctx.get('tags'):
3756
+ response += f" 🏷️ Tags: {', '.join(ctx['tags'])}\n"
3757
+
3758
+ response += "\n"
3759
+
3760
+ response += f"**Actions:**\n"
3761
+ response += f"• `context(action='retrieve', context_id='...')` - Get full context\n"
3762
+ response += f"• Refine search with different keywords\n"
3763
+ response += f"• Use tags or agent filters for better results"
3764
+
3765
+ return [TextContent(type="text", text=response)]
3766
+
3767
+ # ===== UPDATE ACTION =====
3768
+ elif action == "update":
3769
+ if not context_id or not context_id.strip():
3770
+ return [TextContent(type="text", text="❌ Error: context_id is required for update action")]
3771
+
3772
+ # Check that at least one field is being updated
3773
+ if not any([title, summary, content, tags is not None, metadata is not None]):
3774
+ return [TextContent(
3775
+ type="text",
3776
+ text="❌ Error: At least one field must be provided for update (title, summary, content, tags, or metadata)"
3777
+ )]
3778
+
3779
+ # Validate fields if provided
3780
+ if title is not None and (not title.strip() or len(title) > 200):
3781
+ return [TextContent(type="text", text="❌ Error: Title must be 1-200 characters")]
3782
+
3783
+ if summary is not None and (len(summary) < 10 or len(summary) > 1000):
3784
+ return [TextContent(type="text", text="❌ Error: Summary must be 10-1000 characters")]
3785
+
3786
+ if content is not None and len(content) < 50:
3787
+ return [TextContent(type="text", text="❌ Error: Content must be at least 50 characters")]
3788
+
3789
+ if tags is not None and len(tags) > 10:
3790
+ return [TextContent(type="text", text="❌ Error: Maximum 10 tags allowed")]
2949
3791
 
2950
- This tool enables agents to save conversation contexts that can be shared
2951
- with other AI agents, creating seamless handoffs between different coding
2952
- environments (e.g., Cursor Claude Code).
3792
+ result = await client.update_context(
3793
+ context_id=context_id.strip(),
3794
+ title=title.strip() if title else None,
3795
+ summary=summary.strip() if summary else None,
3796
+ content=content,
3797
+ tags=tags,
3798
+ metadata=metadata
3799
+ )
3800
+
3801
+ if not result:
3802
+ return [TextContent(
3803
+ type="text",
3804
+ text=f"❌ Error: Context with ID `{context_id}` not found"
3805
+ )]
3806
+
3807
+ # List updated fields
3808
+ updated_fields = []
3809
+ if title is not None:
3810
+ updated_fields.append("title")
3811
+ if summary is not None:
3812
+ updated_fields.append("summary")
3813
+ if content is not None:
3814
+ updated_fields.append("content")
3815
+ if tags is not None:
3816
+ updated_fields.append("tags")
3817
+ if metadata is not None:
3818
+ updated_fields.append("metadata")
3819
+
3820
+ response = f"✅ **Context Updated Successfully!**\n\n"
3821
+ response += f"🆔 **Context ID:** `{context_id}`\n"
3822
+ response += f"📝 **Title:** {result['title']}\n"
3823
+ response += f"🔄 **Updated Fields:** {', '.join(updated_fields)}\n"
3824
+ response += f"🤖 **Source Agent:** {result['agent_source']}\n\n"
3825
+
3826
+ response += f"**Current Status:**\n"
3827
+ response += f"• **Tags:** {', '.join(result['tags']) if result.get('tags') else 'None'}\n"
3828
+ response += f"• **Content Length:** {len(result['content']):,} characters\n\n"
3829
+
3830
+ response += f"Use `context(action='retrieve', context_id='{context_id}')` to see the full updated context."
3831
+
3832
+ return [TextContent(type="text", text=response)]
3833
+
3834
+ # ===== DELETE ACTION =====
3835
+ elif action == "delete":
3836
+ if not context_id or not context_id.strip():
3837
+ return [TextContent(type="text", text="❌ Error: context_id is required for delete action")]
3838
+
3839
+ success = await client.delete_context(context_id.strip())
3840
+
3841
+ if success:
3842
+ return [TextContent(
3843
+ type="text",
3844
+ text=f"✅ **Context Deleted Successfully!**\n\n"
3845
+ f"🆔 **Context ID:** `{context_id}`\n\n"
3846
+ f"The context has been permanently removed from your account.\n"
3847
+ f"This action cannot be undone.\n\n"
3848
+ f"Use `context(action='list')` to see your remaining contexts."
3849
+ )]
3850
+ else:
3851
+ return [TextContent(
3852
+ type="text",
3853
+ text=f"❌ **Context Not Found**\n\n"
3854
+ f"Context ID `{context_id}` was not found or has already been deleted.\n\n"
3855
+ f"Use `context(action='list')` to see your available contexts."
3856
+ )]
3857
+
3858
+ except APIError as e:
3859
+ logger.error(f"API Error in context ({action}): {e}")
3860
+ return [TextContent(type="text", text=f"❌ API Error: {str(e)}")]
3861
+ except Exception as e:
3862
+ logger.error(f"Error in context ({action}): {e}")
3863
+ return [TextContent(type="text", text=f"❌ Error in {action} operation: {str(e)}")]
3864
+
3865
+ # DEPRECATED: Individual context tools below - use context() with action parameter instead
3866
+
3867
+ # @mcp.tool()
3868
+ # async def save_context(
3869
+ # title: str,
3870
+ # summary: str,
3871
+ # content: str,
3872
+ # agent_source: str,
3873
+ # tags: Optional[List[str]] = None,
3874
+ # metadata: Optional[dict] = None,
3875
+ # nia_references: Optional[dict] = None,
3876
+ # edited_files: Optional[List[dict]] = None
3877
+ # ) -> List[TextContent]:
3878
+ """
3879
+ Save a conversation context for cross-agent sharing.
2953
3880
 
2954
3881
  Args:
2955
- title: A descriptive title for the context (1-200 characters)
2956
- summary: Brief summary of the conversation (10-1000 characters)
3882
+ title: A descriptive title for the context
3883
+ summary: Brief summary of the conversation
2957
3884
  content: Full conversation context - the agent should compact the conversation history but keep all important parts togethers, as well as code snippets. No excuses.
2958
- agent_source: Which agent is creating this context (e.g., "cursor", "claude-code", "windsurf")
3885
+ agent_source: Which agent is creating this context (e.g., "cursor")
2959
3886
  tags: Optional list of searchable tags
2960
3887
  metadata: Optional metadata like file paths, repositories discussed, etc.
2961
3888
  nia_references: Structured data about NIA resources used during conversation
@@ -2969,20 +3896,6 @@ async def save_context(
2969
3896
 
2970
3897
  Returns:
2971
3898
  Confirmation of successful context save with context ID
2972
-
2973
- Example:
2974
- save_context(
2975
- title="Streaming AI SDK Implementation",
2976
- summary="Planning conversation about implementing streaming responses with AI SDK",
2977
- content="User asked about implementing streaming... [agent should include conversation]",
2978
- agent_source="cursor",
2979
- tags=["streaming", "ai-sdk", "implementation"],
2980
- nia_references={
2981
- "indexed_resources": [{"identifier": "vercel/ai", "resource_type": "repository", "purpose": "Reference for streaming implementation"}],
2982
- "search_queries": [{"query": "streaming API", "query_type": "documentation", "key_findings": "Found useChat hook with streaming"}]
2983
- },
2984
- edited_files=[{"file_path": "src/chat.ts", "operation": "created", "changes_description": "Added streaming chat component"}]
2985
- )
2986
3899
  """
2987
3900
  try:
2988
3901
  # Validate input parameters
@@ -3043,13 +3956,14 @@ async def save_context(
3043
3956
  logger.error(f"Error saving context: {e}")
3044
3957
  return [TextContent(type="text", text=f"❌ Error saving context: {str(e)}")]
3045
3958
 
3046
- @mcp.tool()
3047
- async def list_contexts(
3048
- limit: int = 20,
3049
- offset: int = 0,
3050
- tags: Optional[str] = None,
3051
- agent_source: Optional[str] = None
3052
- ) -> List[TextContent]:
3959
+ # DEPRECATED: Use context(action="list") instead
3960
+ # @mcp.tool()
3961
+ # async def list_contexts(
3962
+ # limit: int = 20,
3963
+ # offset: int = 0,
3964
+ # tags: Optional[str] = None,
3965
+ # agent_source: Optional[str] = None
3966
+ # ) -> List[TextContent]:
3053
3967
  """
3054
3968
  List saved conversation contexts with pagination and filtering.
3055
3969
 
@@ -3061,12 +3975,6 @@ async def list_contexts(
3061
3975
 
3062
3976
  Returns:
3063
3977
  List of conversation contexts with pagination info
3064
-
3065
- Examples:
3066
- - list_contexts() - List recent 20 contexts
3067
- - list_contexts(limit=50) - List recent 50 contexts
3068
- - list_contexts(tags="streaming,ai-sdk") - Filter by tags
3069
- - list_contexts(agent_source="cursor") - Only contexts from Cursor
3070
3978
  """
3071
3979
  try:
3072
3980
  # Validate parameters
@@ -3146,8 +4054,9 @@ async def list_contexts(
3146
4054
  logger.error(f"Error listing contexts: {e}")
3147
4055
  return [TextContent(type="text", text=f"❌ Error listing contexts: {str(e)}")]
3148
4056
 
3149
- @mcp.tool()
3150
- async def retrieve_context(context_id: str) -> List[TextContent]:
4057
+ # DEPRECATED: Use context(action="retrieve") instead
4058
+ # @mcp.tool()
4059
+ # async def retrieve_context(context_id: str) -> List[TextContent]:
3151
4060
  """
3152
4061
  Retrieve a specific conversation context by ID.
3153
4062
 
@@ -3323,19 +4232,17 @@ async def retrieve_context(context_id: str) -> List[TextContent]:
3323
4232
  logger.error(f"Error retrieving context: {e}")
3324
4233
  return [TextContent(type="text", text=f"❌ Error retrieving context: {str(e)}")]
3325
4234
 
3326
- @mcp.tool()
3327
- async def search_contexts(
3328
- query: str,
3329
- limit: int = 20,
3330
- tags: Optional[str] = None,
3331
- agent_source: Optional[str] = None
3332
- ) -> List[TextContent]:
4235
+ # DEPRECATED: Use context(action="search") instead
4236
+ # @mcp.tool()
4237
+ # async def search_contexts(
4238
+ # query: str,
4239
+ # limit: int = 20,
4240
+ # tags: Optional[str] = None,
4241
+ # agent_source: Optional[str] = None
4242
+ # ) -> List[TextContent]:
3333
4243
  """
3334
4244
  Search conversation contexts by content, title, or summary.
3335
4245
 
3336
- Perfect for finding relevant contexts when you remember part of the
3337
- conversation but not the exact context ID.
3338
-
3339
4246
  Args:
3340
4247
  query: Search query to match against title, summary, content, and tags
3341
4248
  limit: Maximum number of results to return (1-100, default: 20)
@@ -3344,11 +4251,6 @@ async def search_contexts(
3344
4251
 
3345
4252
  Returns:
3346
4253
  Search results with matching contexts
3347
-
3348
- Examples:
3349
- - search_contexts("streaming AI SDK")
3350
- - search_contexts("authentication", tags="security,implementation")
3351
- - search_contexts("database", agent_source="cursor")
3352
4254
  """
3353
4255
  try:
3354
4256
  # Validate parameters
@@ -3428,15 +4330,16 @@ async def search_contexts(
3428
4330
  logger.error(f"Error searching contexts: {e}")
3429
4331
  return [TextContent(type="text", text=f"❌ Error searching contexts: {str(e)}")]
3430
4332
 
3431
- @mcp.tool()
3432
- async def update_context(
3433
- context_id: str,
3434
- title: Optional[str] = None,
3435
- summary: Optional[str] = None,
3436
- content: Optional[str] = None,
3437
- tags: Optional[List[str]] = None,
3438
- metadata: Optional[dict] = None
3439
- ) -> List[TextContent]:
4333
+ # DEPRECATED: Use context(action="update") instead
4334
+ # @mcp.tool()
4335
+ # async def update_context(
4336
+ # context_id: str,
4337
+ # title: Optional[str] = None,
4338
+ # summary: Optional[str] = None,
4339
+ # content: Optional[str] = None,
4340
+ # tags: Optional[List[str]] = None,
4341
+ # metadata: Optional[dict] = None
4342
+ # ) -> List[TextContent]:
3440
4343
  """
3441
4344
  Update an existing conversation context.
3442
4345
 
@@ -3450,13 +4353,6 @@ async def update_context(
3450
4353
 
3451
4354
  Returns:
3452
4355
  Confirmation of successful update
3453
-
3454
- Example:
3455
- update_context(
3456
- context_id="550e8400-e29b-41d4-a716-446655440000",
3457
- title="Updated: Streaming AI SDK Implementation",
3458
- tags=["streaming", "ai-sdk", "completed"]
3459
- )
3460
4356
  """
3461
4357
  try:
3462
4358
  if not context_id or not context_id.strip():
@@ -3545,8 +4441,9 @@ async def update_context(
3545
4441
  logger.error(f"Error updating context: {e}")
3546
4442
  return [TextContent(type="text", text=f"❌ Error updating context: {str(e)}")]
3547
4443
 
3548
- @mcp.tool()
3549
- async def delete_context(context_id: str) -> List[TextContent]:
4444
+ # DEPRECATED: Use context(action="delete") instead
4445
+ # @mcp.tool()
4446
+ # async def delete_context(context_id: str) -> List[TextContent]:
3550
4447
  """
3551
4448
  Delete a conversation context.
3552
4449