nia-mcp-server 1.0.26__py3-none-any.whl → 1.0.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nia-mcp-server might be problematic. Click here for more details.

nia_mcp_server/server.py CHANGED
@@ -72,54 +72,188 @@ async def ensure_api_client() -> NIAApiClient:
72
72
  raise ValueError("Failed to validate API key. Check logs for details.")
73
73
  return api_client
74
74
 
75
+ def _detect_resource_type(url: str) -> str:
76
+ """Detect if URL is a GitHub repository or documentation.
77
+
78
+ Args:
79
+ url: The URL to analyze
80
+
81
+ Returns:
82
+ "repository" if GitHub URL or repository pattern, "documentation" otherwise
83
+ """
84
+ import re
85
+ from urllib.parse import urlparse
86
+
87
+ try:
88
+ # First, check for repository-like patterns
89
+ # Pattern 1: owner/repo format (simple case with single slash)
90
+ if re.match(r'^[a-zA-Z0-9_-]+/[a-zA-Z0-9_.-]+$', url):
91
+ return "repository"
92
+
93
+ # Pattern 2: Git SSH format (git@github.com:owner/repo.git)
94
+ if url.startswith('git@'):
95
+ return "repository"
96
+
97
+ # Pattern 3: Git protocol (git://...)
98
+ if url.startswith('git://'):
99
+ return "repository"
100
+
101
+ # Pattern 4: Ends with .git
102
+ if url.endswith('.git'):
103
+ return "repository"
104
+
105
+ # Pattern 5: owner/repo/tree/branch or owner/repo/tree/branch/... format
106
+ if re.match(r'^[a-zA-Z0-9_-]+/[a-zA-Z0-9_.-]+/tree/.+', url):
107
+ return "repository"
108
+
109
+ # Parse as URL for domain-based detection
110
+ parsed = urlparse(url)
111
+ # Only treat as repository if it's actually the github.com domain
112
+ netloc = parsed.netloc.lower()
113
+ if netloc == "github.com" or netloc == "www.github.com":
114
+ return "repository"
115
+
116
+ return "documentation"
117
+ except Exception:
118
+ # Fallback to documentation if parsing fails
119
+ return "documentation"
120
+
75
121
  # Tools
76
122
 
77
123
  @mcp.tool()
78
- async def index_repository(
79
- repo_url: str,
80
- branch: Optional[str] = None
124
+ async def index(
125
+ url: str,
126
+ resource_type: Optional[str] = None,
127
+ branch: Optional[str] = None,
128
+ url_patterns: Optional[List[str]] = None,
129
+ exclude_patterns: Optional[List[str]] = None,
130
+ max_age: Optional[int] = None,
131
+ only_main_content: Optional[bool] = True,
132
+ wait_for: Optional[int] = None,
133
+ include_screenshot: Optional[bool] = None,
134
+ check_llms_txt: Optional[bool] = True,
135
+ llms_txt_strategy: Optional[str] = "prefer"
81
136
  ) -> List[TextContent]:
82
137
  """
138
+ Universal indexing tool - intelligently indexes GitHub repositories or documentation.
139
+
140
+ Auto-detects resource type from URL:
141
+ - GitHub URLs (containing "github.com") → Repository indexing
142
+ - All other URLs → Documentation indexing
143
+
83
144
  Args:
84
- repo_url: GitHub repository URL (e.g., https://github.com/owner/repo or https://github.com/owner/repo/tree/branch)
145
+ url: GitHub repository URL or documentation site URL (required)
146
+ resource_type: Optional override - "repository" or "documentation" (auto-detected if not provided)
147
+
148
+ # Repository-specific parameters:
85
149
  branch: Branch to index (optional, defaults to main branch)
86
150
 
151
+ # Documentation-specific parameters:
152
+ url_patterns: Optional list of URL patterns to include in crawling
153
+ exclude_patterns: Optional list of URL patterns to exclude from crawling
154
+ max_age: Maximum age of cached content in days
155
+ only_main_content: Extract only main content (default: True)
156
+ wait_for: Time to wait for page load in milliseconds
157
+ include_screenshot: Include screenshots of pages
158
+ check_llms_txt: Check for llms.txt file (default: True)
159
+ llms_txt_strategy: Strategy for llms.txt - "prefer", "only", or "ignore" (default: "prefer")
160
+
161
+ Returns:
162
+ Status of the indexing operation
163
+
164
+ Examples:
165
+ # Index a GitHub repository (auto-detected)
166
+ index("https://github.com/owner/repo", branch="main")
167
+
168
+ # Index documentation (auto-detected)
169
+ index("https://docs.example.com", url_patterns=["*/api/*"])
170
+
171
+ # Manual override (if needed for edge cases)
172
+ index("https://github.io/docs", resource_type="documentation")
173
+
87
174
  Important:
88
- - When started indexing, prompt users to either use check_repository_status tool or go to app.trynia.ai to check the status.
175
+ - When indexing starts, use check_resource_status to monitor progress
176
+ - Repository identifier format: owner/repo or owner/repo/tree/branch
89
177
  """
90
178
  try:
91
179
  client = await ensure_api_client()
92
-
93
- # Start indexing
94
- logger.info(f"Starting to index repository: {repo_url}")
95
- result = await client.index_repository(repo_url, branch)
96
-
97
- repository = result.get("repository", repo_url)
98
- status = result.get("status", "unknown")
99
-
100
- if status == "completed":
101
- return [TextContent(
102
- type="text",
103
- text=f"✅ Repository already indexed: {repository}\n"
104
- f"Branch: {result.get('branch', 'main')}\n"
105
- f"You can now search this codebase!"
106
- )]
180
+
181
+ # Detect or validate resource type
182
+ if resource_type:
183
+ if resource_type not in ["repository", "documentation"]:
184
+ return [TextContent(
185
+ type="text",
186
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
187
+ )]
188
+ detected_type = resource_type
107
189
  else:
108
- # Wait for indexing to complete
109
- return [TextContent(
110
- type="text",
111
- text=f"⏳ Indexing started for: {repository}\n"
112
- f"Branch: {branch or 'default'}\n"
113
- f"Status: {status}\n\n"
114
- f"Use `check_repository_status` to monitor progress."
115
- )]
116
-
190
+ detected_type = _detect_resource_type(url)
191
+
192
+ logger.info(f"Indexing {detected_type}: {url}")
193
+
194
+ # Route to appropriate indexing method
195
+ if detected_type == "repository":
196
+ # Index repository
197
+ result = await client.index_repository(url, branch)
198
+
199
+ repository = result.get("repository", url)
200
+ status = result.get("status", "unknown")
201
+
202
+ if status == "completed":
203
+ return [TextContent(
204
+ type="text",
205
+ text=f"✅ Repository already indexed: {repository}\n"
206
+ f"Branch: {result.get('branch', 'main')}\n"
207
+ f"You can now search this codebase!"
208
+ )]
209
+ else:
210
+ return [TextContent(
211
+ type="text",
212
+ text=f"⏳ Indexing started for: {repository}\n"
213
+ f"Branch: {branch or 'default'}\n"
214
+ f"Status: {status}\n\n"
215
+ f"Use `check_resource_status(\"repository\", \"{repository}\")` to monitor progress."
216
+ )]
217
+
218
+ else: # documentation
219
+ # Index documentation
220
+ result = await client.create_data_source(
221
+ url=url,
222
+ url_patterns=url_patterns,
223
+ exclude_patterns=exclude_patterns,
224
+ max_age=max_age,
225
+ only_main_content=only_main_content,
226
+ wait_for=wait_for,
227
+ include_screenshot=include_screenshot,
228
+ check_llms_txt=check_llms_txt,
229
+ llms_txt_strategy=llms_txt_strategy
230
+ )
231
+
232
+ source_id = result.get("id")
233
+ status = result.get("status", "unknown")
234
+
235
+ if status == "completed":
236
+ return [TextContent(
237
+ type="text",
238
+ text=f"✅ Documentation already indexed: {url}\n"
239
+ f"Source ID: {source_id}\n"
240
+ f"You can now search this documentation!"
241
+ )]
242
+ else:
243
+ return [TextContent(
244
+ type="text",
245
+ text=f"⏳ Documentation indexing started: {url}\n"
246
+ f"Source ID: {source_id}\n"
247
+ f"Status: {status}\n\n"
248
+ f"Use `check_resource_status(\"documentation\", \"{source_id}\")` to monitor progress."
249
+ )]
250
+
117
251
  except APIError as e:
118
- logger.error(f"API Error indexing repository: {e} (status_code={e.status_code}, detail={e.detail})")
252
+ logger.error(f"API Error indexing {detected_type}: {e} (status_code={e.status_code}, detail={e.detail})")
119
253
  if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
120
254
  if e.detail and "3 free indexing operations" in e.detail:
121
255
  return [TextContent(
122
- type="text",
256
+ type="text",
123
257
  text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
124
258
  )]
125
259
  else:
@@ -130,7 +264,7 @@ async def index_repository(
130
264
  else:
131
265
  return [TextContent(type="text", text=f"❌ {str(e)}")]
132
266
  except Exception as e:
133
- logger.error(f"Unexpected error indexing repository: {e}")
267
+ logger.error(f"Unexpected error indexing: {e}")
134
268
  error_msg = str(e)
135
269
  if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
136
270
  return [TextContent(
@@ -139,9 +273,79 @@ async def index_repository(
139
273
  )]
140
274
  return [TextContent(
141
275
  type="text",
142
- text=f"❌ Error indexing repository: {error_msg}"
276
+ text=f"❌ Error indexing: {error_msg}"
143
277
  )]
144
278
 
279
+ # @mcp.tool()
280
+ # async def index_repository(
281
+ # repo_url: str,
282
+ # branch: Optional[str] = None
283
+ # ) -> List[TextContent]:
284
+ # """
285
+ # DEPRECATED: Use the unified `index` tool instead.
286
+ #
287
+ # Args:
288
+ # repo_url: GitHub repository URL (e.g., https://github.com/owner/repo or https://github.com/owner/repo/tree/branch)
289
+ # branch: Branch to index (optional, defaults to main branch)
290
+ #
291
+ # Important:
292
+ # - When started indexing, prompt users to either use check_repository_status tool or go to app.trynia.ai to check the status.
293
+ # """
294
+ # try:
295
+ # client = await ensure_api_client()
296
+ #
297
+ # # Start indexing
298
+ # logger.info(f"Starting to index repository: {repo_url}")
299
+ # result = await client.index_repository(repo_url, branch)
300
+ #
301
+ # repository = result.get("repository", repo_url)
302
+ # status = result.get("status", "unknown")
303
+ #
304
+ # if status == "completed":
305
+ # return [TextContent(
306
+ # type="text",
307
+ # text=f"✅ Repository already indexed: {repository}\n"
308
+ # f"Branch: {result.get('branch', 'main')}\n"
309
+ # f"You can now search this codebase!"
310
+ # )]
311
+ # else:
312
+ # # Wait for indexing to complete
313
+ # return [TextContent(
314
+ # type="text",
315
+ # text=f"⏳ Indexing started for: {repository}\n"
316
+ # f"Branch: {branch or 'default'}\n"
317
+ # f"Status: {status}\n\n"
318
+ # f"Use `check_repository_status` to monitor progress."
319
+ # )]
320
+ #
321
+ # except APIError as e:
322
+ # logger.error(f"API Error indexing repository: {e} (status_code={e.status_code}, detail={e.detail})")
323
+ # if e.status_code == 403 or "free tier limit" in str(e).lower() or "indexing operations" in str(e).lower():
324
+ # if e.detail and "3 free indexing operations" in e.detail:
325
+ # return [TextContent(
326
+ # type="text",
327
+ # text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
328
+ # )]
329
+ # else:
330
+ # return [TextContent(
331
+ # type="text",
332
+ # text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
333
+ # )]
334
+ # else:
335
+ # return [TextContent(type="text", text=f"❌ {str(e)}")]
336
+ # except Exception as e:
337
+ # logger.error(f"Unexpected error indexing repository: {e}")
338
+ # error_msg = str(e)
339
+ # if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
340
+ # return [TextContent(
341
+ # type="text",
342
+ # text=f"❌ {error_msg}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
343
+ # )]
344
+ # return [TextContent(
345
+ # type="text",
346
+ # text=f"❌ Error indexing repository: {error_msg}"
347
+ # )]
348
+
145
349
  @mcp.tool()
146
350
  async def search_codebase(
147
351
  query: str,
@@ -300,6 +504,164 @@ async def search_codebase(
300
504
  text=f"❌ Error searching codebase: {error_msg}"
301
505
  )]
302
506
 
507
+ @mcp.tool()
508
+ async def regex_search(
509
+ repositories: List[str],
510
+ query: str,
511
+ pattern: Optional[str] = None,
512
+ file_extensions: Optional[List[str]] = None,
513
+ languages: Optional[List[str]] = None,
514
+ max_results: int = 50,
515
+ include_context: bool = True,
516
+ context_lines: int = 3
517
+ ) -> List[TextContent]:
518
+ """
519
+ Perform regex pattern search over indexed repository source code.
520
+
521
+ Args:
522
+ repositories: List of repositories to search (owner/repo format)
523
+ query: Natural language query or regex pattern (e.g., "function handleSubmit", "class UserController", "/async\\s+function/")
524
+ pattern: Optional explicit regex pattern (overrides automatic extraction from query)
525
+ file_extensions: File extensions to filter (e.g., [".js", ".tsx", ".py"])
526
+ languages: Programming languages to filter (e.g., ["python", "javascript", "typescript"])
527
+ max_results: Maximum number of results to return (default: 50)
528
+ include_context: Include surrounding context lines (default: True)
529
+ context_lines: Number of context lines before/after match (default: 3)
530
+
531
+ Returns:
532
+ Regex search results with exact matches, file locations, and context
533
+
534
+ Examples:
535
+ - Natural language: `regex_search(["owner/repo"], "function handleSubmit")`
536
+ - Direct regex: `regex_search(["owner/repo"], "/async\\s+function\\s+\\w+/")`
537
+ - With filters: `regex_search(["owner/repo"], "class Controller", file_extensions=[".py"])`
538
+ """
539
+ try:
540
+ client = await ensure_api_client()
541
+
542
+ # Require explicit repository selection
543
+ if not repositories:
544
+ return [TextContent(
545
+ type="text",
546
+ text="🔍 **Please specify which repositories to search:**\n\n"
547
+ "1. Use `list_repositories` to see available repositories\n"
548
+ "2. Then call `regex_search([\"owner/repo\"], \"pattern\")`\n\n"
549
+ "**Examples:**\n"
550
+ "```python\n"
551
+ "# Natural language pattern\n"
552
+ "regex_search([\"facebook/react\"], \"function useState\")\n\n"
553
+ "# Direct regex pattern\n"
554
+ "regex_search([\"django/django\"], \"/class\\s+\\w+Admin/\")\n\n"
555
+ "# With file filters\n"
556
+ "regex_search([\"owner/repo\"], \"import React\", file_extensions=[\".tsx\"])\n"
557
+ "```"
558
+ )]
559
+
560
+ logger.info(f"Performing regex search in {len(repositories)} repositories for query: {query}")
561
+
562
+ # Call the regex search API
563
+ result = await client.regex_search(
564
+ repositories=repositories,
565
+ query=query,
566
+ pattern=pattern,
567
+ file_extensions=file_extensions,
568
+ languages=languages,
569
+ max_results=max_results,
570
+ include_context=include_context,
571
+ context_lines=context_lines
572
+ )
573
+
574
+ # Check for errors
575
+ if not result.get("success"):
576
+ error_msg = result.get("error", "Unknown error")
577
+ return [TextContent(
578
+ type="text",
579
+ text=f"❌ **Regex Search Error:** {error_msg}"
580
+ )]
581
+
582
+ # Format the results
583
+ response_parts = []
584
+
585
+ # Add search summary
586
+ total_matches = result.get("total_matches", 0)
587
+ total_files = result.get("total_files", 0)
588
+ pattern_used = result.get("pattern", query)
589
+
590
+ summary = f"🔍 **Regex Search Results**\n\n"
591
+ summary += f"**Query:** `{query}`\n"
592
+ if pattern and pattern != query:
593
+ summary += f"**Pattern Used:** `{pattern_used}`\n"
594
+ summary += f"**Matches Found:** {total_matches} matches in {total_files} files\n"
595
+
596
+ if file_extensions:
597
+ summary += f"**File Extensions:** {', '.join(file_extensions)}\n"
598
+ if languages:
599
+ summary += f"**Languages:** {', '.join(languages)}\n"
600
+
601
+ response_parts.append(summary)
602
+ response_parts.append("\n---\n")
603
+
604
+ # Add the actual results
605
+ results = result.get("results", [])
606
+
607
+ if not results:
608
+ response_parts.append("\n📭 No matches found for the given pattern.")
609
+ else:
610
+ # Group results by file
611
+ file_matches = {}
612
+ for match in results:
613
+ file_path = match.get("file_path", "Unknown")
614
+ if file_path not in file_matches:
615
+ file_matches[file_path] = []
616
+ file_matches[file_path].append(match)
617
+
618
+ # Format each file's matches
619
+ for file_path, matches in file_matches.items():
620
+ response_parts.append(f"\n### 📄 `{file_path}`\n")
621
+
622
+ for match in matches:
623
+ line_number = match.get("line_number", 0)
624
+ matched_text = match.get("matched_text", "")
625
+ context = match.get("context", "")
626
+
627
+ response_parts.append(f"\n**Line {line_number}:** `{matched_text}`\n")
628
+
629
+ if include_context and context:
630
+ # Format context with line numbers
631
+ context_start = match.get("context_start_line", line_number)
632
+ response_parts.append("\n```\n")
633
+ context_lines_list = context.split('\n')
634
+ for i, line in enumerate(context_lines_list):
635
+ current_line_num = context_start + i
636
+ marker = ">" if current_line_num == line_number else " "
637
+ response_parts.append(f"{marker}{current_line_num:4d}: {line}\n")
638
+ response_parts.append("```\n")
639
+
640
+ # Add search hints if available
641
+ search_hints = result.get("search_hints", {})
642
+ if search_hints and search_hints.get("is_regex"):
643
+ response_parts.append("\n---\n")
644
+ response_parts.append("💡 **Search Hints:**\n")
645
+ if search_hints.get("case_sensitive"):
646
+ response_parts.append("- Case-sensitive search enabled\n")
647
+ if search_hints.get("whole_word"):
648
+ response_parts.append("- Whole word matching enabled\n")
649
+
650
+ # Combine all parts into final response
651
+ full_response = "".join(response_parts)
652
+
653
+ return [TextContent(
654
+ type="text",
655
+ text=full_response
656
+ )]
657
+
658
+ except Exception as e:
659
+ logger.error(f"Regex search error: {e}", exc_info=True)
660
+ return [TextContent(
661
+ type="text",
662
+ text=f"❌ **Regex Search Error:** {str(e)}"
663
+ )]
664
+
303
665
  @mcp.tool()
304
666
  async def search_documentation(
305
667
  query: str,
@@ -583,74 +945,76 @@ async def search_documentation(
583
945
  text=f"❌ Error checking repository status: {str(e)}"
584
946
  )]
585
947
 
586
- @mcp.tool()
587
- async def index_documentation(
588
- url: str,
589
- url_patterns: Optional[List[str]] = None,
590
- exclude_patterns: Optional[List[str]] = None,
591
- max_age: Optional[int] = None,
592
- only_main_content: Optional[bool] = True,
593
- wait_for: Optional[int] = None,
594
- include_screenshot: Optional[bool] = None,
595
- check_llms_txt: Optional[bool] = True,
596
- llms_txt_strategy: Optional[str] = "prefer"
597
- ) -> List[TextContent]:
598
- """
599
- Index documentation or website for intelligent search.
600
-
601
- Args:
602
- url: URL of the documentation site to index
603
- url_patterns: Optional list of URL patterns to include in crawling
604
- exclude_patterns: Optional list of URL patterns to exclude from crawling
605
- """
606
- try:
607
- client = await ensure_api_client()
608
-
609
- # Create and start indexing
610
- logger.info(f"Starting to index documentation: {url}")
611
- result = await client.create_data_source(
612
- url=url,
613
- url_patterns=url_patterns,
614
- exclude_patterns=exclude_patterns,
615
- max_age=max_age,
616
- only_main_content=only_main_content,
617
- wait_for=wait_for,
618
- include_screenshot=include_screenshot,
619
- check_llms_txt=check_llms_txt,
620
- llms_txt_strategy=llms_txt_strategy
621
- )
622
-
623
- source_id = result.get("id")
624
- status = result.get("status", "unknown")
625
-
626
- if status == "completed":
627
- return [TextContent(
628
- type="text",
629
- text=f"✅ Documentation already indexed: {url}\n"
630
- f"Source ID: {source_id}\n"
631
- f"You can now search this documentation!"
632
- )]
633
- else:
634
- return [TextContent(
635
- type="text",
636
- text=f"⏳ Documentation indexing started: {url}\n"
637
- f"Source ID: {source_id}\n"
638
- f"Status: {status}\n\n"
639
- f"Use `check_documentation_status` to monitor progress."
640
- )]
641
-
642
- except APIError as e:
643
- logger.error(f"API Error indexing documentation: {e}")
644
- error_msg = f"❌ {str(e)}"
645
- if e.status_code == 403 and "lifetime limit" in str(e).lower():
646
- error_msg += "\n\n💡 Tip: You've reached the free tier limit of 3 indexing operations. Upgrade to Pro for unlimited access."
647
- return [TextContent(type="text", text=error_msg)]
648
- except Exception as e:
649
- logger.error(f"Error indexing documentation: {e}")
650
- return [TextContent(
651
- type="text",
652
- text=f"❌ Error indexing documentation: {str(e)}"
653
- )]
948
+ # @mcp.tool()
949
+ # async def index_documentation(
950
+ # url: str,
951
+ # url_patterns: Optional[List[str]] = None,
952
+ # exclude_patterns: Optional[List[str]] = None,
953
+ # max_age: Optional[int] = None,
954
+ # only_main_content: Optional[bool] = True,
955
+ # wait_for: Optional[int] = None,
956
+ # include_screenshot: Optional[bool] = None,
957
+ # check_llms_txt: Optional[bool] = True,
958
+ # llms_txt_strategy: Optional[str] = "prefer"
959
+ # ) -> List[TextContent]:
960
+ # """
961
+ # DEPRECATED: Use the unified `index` tool instead.
962
+ #
963
+ # Index documentation or website for intelligent search.
964
+ #
965
+ # Args:
966
+ # url: URL of the documentation site to index
967
+ # url_patterns: Optional list of URL patterns to include in crawling
968
+ # exclude_patterns: Optional list of URL patterns to exclude from crawling
969
+ # """
970
+ # try:
971
+ # client = await ensure_api_client()
972
+ #
973
+ # # Create and start indexing
974
+ # logger.info(f"Starting to index documentation: {url}")
975
+ # result = await client.create_data_source(
976
+ # url=url,
977
+ # url_patterns=url_patterns,
978
+ # exclude_patterns=exclude_patterns,
979
+ # max_age=max_age,
980
+ # only_main_content=only_main_content,
981
+ # wait_for=wait_for,
982
+ # include_screenshot=include_screenshot,
983
+ # check_llms_txt=check_llms_txt,
984
+ # llms_txt_strategy=llms_txt_strategy
985
+ # )
986
+ #
987
+ # source_id = result.get("id")
988
+ # status = result.get("status", "unknown")
989
+ #
990
+ # if status == "completed":
991
+ # return [TextContent(
992
+ # type="text",
993
+ # text=f" Documentation already indexed: {url}\n"
994
+ # f"Source ID: {source_id}\n"
995
+ # f"You can now search this documentation!"
996
+ # )]
997
+ # else:
998
+ # return [TextContent(
999
+ # type="text",
1000
+ # text=f"⏳ Documentation indexing started: {url}\n"
1001
+ # f"Source ID: {source_id}\n"
1002
+ # f"Status: {status}\n\n"
1003
+ # f"Use `check_documentation_status` to monitor progress."
1004
+ # )]
1005
+ #
1006
+ # except APIError as e:
1007
+ # logger.error(f"API Error indexing documentation: {e}")
1008
+ # error_msg = f" {str(e)}"
1009
+ # if e.status_code == 403 and "lifetime limit" in str(e).lower():
1010
+ # error_msg += "\n\n💡 Tip: You've reached the free tier limit of 3 indexing operations. Upgrade to Pro for unlimited access."
1011
+ # return [TextContent(type="text", text=error_msg)]
1012
+ # except Exception as e:
1013
+ # logger.error(f"Error indexing documentation: {e}")
1014
+ # return [TextContent(
1015
+ # type="text",
1016
+ # text=f"❌ Error indexing documentation: {str(e)}"
1017
+ # )]
654
1018
 
655
1019
  # @mcp.tool()
656
1020
  # async def list_documentation() -> List[TextContent]:
@@ -776,73 +1140,450 @@ async def index_documentation(
776
1140
  # Combined Resource Management Tools
777
1141
 
778
1142
  @mcp.tool()
779
- async def rename_resource(
780
- resource_type: str,
781
- identifier: str,
782
- new_name: str
1143
+ async def manage_resource(
1144
+ action: str,
1145
+ resource_type: Optional[str] = None,
1146
+ identifier: Optional[str] = None,
1147
+ new_name: Optional[str] = None
783
1148
  ) -> List[TextContent]:
784
1149
  """
785
- Rename a resource (repository or documentation) for better organization.
1150
+ Unified resource management tool for repositories and documentation.
786
1151
 
787
1152
  Args:
788
- resource_type: Type of resource - "repository" or "documentation"
789
- identifier:
1153
+ action: Action to perform - "list", "status", "rename", or "delete"
1154
+ resource_type: Type of resource - "repository" or "documentation" (required for status/rename/delete, optional for list)
1155
+ identifier: Resource identifier (required for status/rename/delete):
790
1156
  - For repos: Repository in owner/repo format (e.g., "facebook/react")
791
1157
  - For docs: UUID preferred, also supports display name or URL
1158
+ new_name: New display name (required only for rename action, 1-100 characters)
1159
+
1160
+ Examples:
1161
+ # List all resources
1162
+ manage_resource(action="list")
1163
+
1164
+ # List only repositories
1165
+ manage_resource(action="list", resource_type="repository")
1166
+
1167
+ # Check status
1168
+ manage_resource(action="status", resource_type="repository", identifier="owner/repo")
1169
+
1170
+ # Rename resource
1171
+ manage_resource(action="rename", resource_type="repository", identifier="owner/repo", new_name="My Project")
1172
+
1173
+ # Delete resource
1174
+ manage_resource(action="delete", resource_type="documentation", identifier="uuid-here")
792
1175
  """
793
1176
  try:
794
- # Validate resource type
795
- if resource_type not in ["repository", "documentation"]:
1177
+ # Validate action
1178
+ valid_actions = ["list", "status", "rename", "delete"]
1179
+ if action not in valid_actions:
796
1180
  return [TextContent(
797
1181
  type="text",
798
- text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
1182
+ text=f"❌ Invalid action: '{action}'. Must be one of: {', '.join(valid_actions)}"
799
1183
  )]
800
1184
 
801
- # Validate name length
802
- if not new_name or len(new_name) > 100:
1185
+ # Validate resource_type when provided
1186
+ if resource_type and resource_type not in ["repository", "documentation"]:
803
1187
  return [TextContent(
804
1188
  type="text",
805
- text="❌ Display name must be between 1 and 100 characters."
1189
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository' or 'documentation'."
806
1190
  )]
807
1191
 
1192
+ # Validate required parameters based on action
1193
+ if action in ["status", "rename", "delete"]:
1194
+ if not resource_type:
1195
+ return [TextContent(
1196
+ type="text",
1197
+ text=f"❌ resource_type is required for action '{action}'"
1198
+ )]
1199
+ if not identifier:
1200
+ return [TextContent(
1201
+ type="text",
1202
+ text=f"❌ identifier is required for action '{action}'"
1203
+ )]
1204
+
1205
+ if action == "rename":
1206
+ if not new_name:
1207
+ return [TextContent(
1208
+ type="text",
1209
+ text="❌ new_name is required for rename action"
1210
+ )]
1211
+ # Validate name length
1212
+ if len(new_name) > 100:
1213
+ return [TextContent(
1214
+ type="text",
1215
+ text="❌ Display name must be between 1 and 100 characters."
1216
+ )]
1217
+
808
1218
  client = await ensure_api_client()
809
1219
 
810
- if resource_type == "repository":
811
- result = await client.rename_repository(identifier, new_name)
812
- resource_desc = f"repository '{identifier}'"
813
- else: # documentation
814
- result = await client.rename_data_source(identifier, new_name)
815
- resource_desc = f"documentation source"
1220
+ # ===== LIST ACTION =====
1221
+ if action == "list":
1222
+ # Validate resource type if provided
1223
+ if resource_type and resource_type not in ["repository", "documentation"]:
1224
+ return [TextContent(
1225
+ type="text",
1226
+ text=f"❌ Invalid resource_type: '{resource_type}'. Must be 'repository', 'documentation', or None for all."
1227
+ )]
1228
+
1229
+ lines = []
1230
+
1231
+ # Determine what to list
1232
+ list_repos = resource_type in [None, "repository"]
1233
+ list_docs = resource_type in [None, "documentation"]
1234
+
1235
+ if list_repos:
1236
+ repositories = await client.list_repositories()
1237
+
1238
+ if repositories:
1239
+ lines.append("# Indexed Repositories\n")
1240
+ for repo in repositories:
1241
+ status_icon = "✅" if repo.get("status") == "completed" else "⏳"
1242
+
1243
+ # Show display name if available, otherwise show repository
1244
+ display_name = repo.get("display_name")
1245
+ repo_name = repo['repository']
1246
+
1247
+ if display_name:
1248
+ lines.append(f"\n## {status_icon} {display_name}")
1249
+ lines.append(f"- **Repository:** {repo_name}")
1250
+ else:
1251
+ lines.append(f"\n## {status_icon} {repo_name}")
1252
+
1253
+ lines.append(f"- **Branch:** {repo.get('branch', 'main')}")
1254
+ lines.append(f"- **Status:** {repo.get('status', 'unknown')}")
1255
+ if repo.get("indexed_at"):
1256
+ lines.append(f"- **Indexed:** {repo['indexed_at']}")
1257
+ if repo.get("error"):
1258
+ lines.append(f"- **Error:** {repo['error']}")
1259
+
1260
+ # Add usage hint for completed repositories
1261
+ if repo.get("status") == "completed":
1262
+ lines.append(f"- **Usage:** `search_codebase(query, [\"{repo_name}\"])`")
1263
+ elif resource_type == "repository":
1264
+ lines.append("No indexed repositories found.\n\n")
1265
+ lines.append("Get started by indexing a repository:\n")
1266
+ lines.append("Use `index` with a GitHub URL.")
1267
+
1268
+ if list_docs:
1269
+ sources = await client.list_data_sources()
1270
+
1271
+ if sources:
1272
+ if lines: # Add separator if we already have repositories
1273
+ lines.append("\n---\n")
1274
+ lines.append("# Indexed Documentation\n")
1275
+
1276
+ for source in sources:
1277
+ status_icon = "✅" if source.get("status") == "completed" else "⏳"
1278
+
1279
+ # Show display name if available, otherwise show URL
1280
+ display_name = source.get("display_name")
1281
+ url = source.get('url', 'Unknown URL')
1282
+
1283
+ if display_name:
1284
+ lines.append(f"\n## {status_icon} {display_name}")
1285
+ lines.append(f"- **URL:** {url}")
1286
+ else:
1287
+ lines.append(f"\n## {status_icon} {url}")
1288
+
1289
+ lines.append(f"- **ID:** {source['id']}")
1290
+ lines.append(f"- **Status:** {source.get('status', 'unknown')}")
1291
+ lines.append(f"- **Type:** {source.get('source_type', 'web')}")
1292
+ if source.get("page_count", 0) > 0:
1293
+ lines.append(f"- **Pages:** {source['page_count']}")
1294
+ if source.get("created_at"):
1295
+ lines.append(f"- **Created:** {source['created_at']}")
1296
+ elif resource_type == "documentation":
1297
+ lines.append("No indexed documentation found.\n\n")
1298
+ lines.append("Get started by indexing documentation:\n")
1299
+ lines.append("Use `index` with a URL.")
1300
+
1301
+ if not lines:
1302
+ lines.append("No indexed resources found.\n\n")
1303
+ lines.append("Get started by indexing:\n")
1304
+ lines.append("- Use `index` for GitHub repos or URLs\n")
1305
+
1306
+ return [TextContent(type="text", text="\n".join(lines))]
1307
+
1308
+ # ===== STATUS ACTION =====
1309
+ elif action == "status":
1310
+ if resource_type == "repository":
1311
+ status = await client.get_repository_status(identifier)
1312
+ if not status:
1313
+ return [TextContent(
1314
+ type="text",
1315
+ text=f"❌ Repository '{identifier}' not found."
1316
+ )]
1317
+ title = f"Repository Status: {identifier}"
1318
+ status_key = "status"
1319
+ else: # documentation
1320
+ status = await client.get_data_source_status(identifier)
1321
+ if not status:
1322
+ return [TextContent(
1323
+ type="text",
1324
+ text=f"❌ Documentation source '{identifier}' not found."
1325
+ )]
1326
+ title = f"Documentation Status: {status.get('url', 'Unknown URL')}"
1327
+ status_key = "status"
1328
+
1329
+ # Format status with appropriate icon
1330
+ status_text = status.get(status_key, "unknown")
1331
+ status_icon = {
1332
+ "completed": "✅",
1333
+ "indexing": "⏳",
1334
+ "processing": "⏳",
1335
+ "failed": "❌",
1336
+ "pending": "🔄",
1337
+ "error": "❌"
1338
+ }.get(status_text, "❓")
1339
+
1340
+ lines = [
1341
+ f"# {title}\n",
1342
+ f"{status_icon} **Status:** {status_text}"
1343
+ ]
1344
+
1345
+ # Add resource-specific fields
1346
+ if resource_type == "repository":
1347
+ lines.append(f"**Branch:** {status.get('branch', 'main')}")
1348
+ if status.get("progress"):
1349
+ progress = status["progress"]
1350
+ if isinstance(progress, dict):
1351
+ lines.append(f"**Progress:** {progress.get('percentage', 0)}%")
1352
+ if progress.get("stage"):
1353
+ lines.append(f"**Stage:** {progress['stage']}")
1354
+ else: # documentation
1355
+ lines.append(f"**Source ID:** {identifier}")
1356
+ if status.get("page_count", 0) > 0:
1357
+ lines.append(f"**Pages Indexed:** {status['page_count']}")
1358
+ if status.get("details"):
1359
+ details = status["details"]
1360
+ if details.get("progress"):
1361
+ lines.append(f"**Progress:** {details['progress']}%")
1362
+ if details.get("stage"):
1363
+ lines.append(f"**Stage:** {details['stage']}")
1364
+
1365
+ # Common fields
1366
+ if status.get("indexed_at"):
1367
+ lines.append(f"**Indexed:** {status['indexed_at']}")
1368
+ elif status.get("created_at"):
1369
+ lines.append(f"**Created:** {status['created_at']}")
1370
+
1371
+ if status.get("error"):
1372
+ lines.append(f"**Error:** {status['error']}")
1373
+
1374
+ return [TextContent(type="text", text="\n".join(lines))]
1375
+
1376
+ # ===== RENAME ACTION =====
1377
+ elif action == "rename":
1378
+ if resource_type == "repository":
1379
+ result = await client.rename_repository(identifier, new_name)
1380
+ resource_desc = f"repository '{identifier}'"
1381
+ else: # documentation
1382
+ result = await client.rename_data_source(identifier, new_name)
1383
+ resource_desc = f"documentation source"
1384
+
1385
+ if result.get("success"):
1386
+ return [TextContent(
1387
+ type="text",
1388
+ text=f"✅ Successfully renamed {resource_desc} to '{new_name}'"
1389
+ )]
1390
+ else:
1391
+ return [TextContent(
1392
+ type="text",
1393
+ text=f"❌ Failed to rename {resource_type}: {result.get('message', 'Unknown error')}"
1394
+ )]
1395
+
1396
+ # ===== DELETE ACTION =====
1397
+ elif action == "delete":
1398
+ if resource_type == "repository":
1399
+ success = await client.delete_repository(identifier)
1400
+ resource_desc = f"repository: {identifier}"
1401
+ else: # documentation
1402
+ success = await client.delete_data_source(identifier)
1403
+ resource_desc = f"documentation source: {identifier}"
1404
+
1405
+ if success:
1406
+ return [TextContent(
1407
+ type="text",
1408
+ text=f"✅ Successfully deleted {resource_desc}"
1409
+ )]
1410
+ else:
1411
+ return [TextContent(
1412
+ type="text",
1413
+ text=f"❌ Failed to delete {resource_desc}"
1414
+ )]
1415
+
1416
+ except APIError as e:
1417
+ logger.error(f"API Error in manage_resource ({action}): {e}")
1418
+ error_msg = f"❌ {str(e)}"
1419
+ if e.status_code == 403 or "free tier limit" in str(e).lower():
1420
+ if e.detail and "3 free indexing operations" in e.detail:
1421
+ error_msg = f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
1422
+ else:
1423
+ error_msg += "\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1424
+ return [TextContent(type="text", text=error_msg)]
1425
+ except Exception as e:
1426
+ logger.error(f"Error in manage_resource ({action}): {e}")
1427
+ error_msg = str(e)
1428
+ if "indexing operations" in error_msg.lower() or "lifetime limit" in error_msg.lower():
1429
+ return [TextContent(
1430
+ type="text",
1431
+ text=f"❌ {error_msg}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited indexing."
1432
+ )]
1433
+ return [TextContent(
1434
+ type="text",
1435
+ text=f"❌ Error in {action} operation: {error_msg}"
1436
+ )]
1437
+
1438
+ @mcp.tool()
1439
+ async def get_github_file_tree(
1440
+ repository: str,
1441
+ branch: Optional[str] = None,
1442
+ include_paths: Optional[List[str]] = None,
1443
+ exclude_paths: Optional[List[str]] = None,
1444
+ file_extensions: Optional[List[str]] = None,
1445
+ exclude_extensions: Optional[List[str]] = None,
1446
+ show_full_paths: bool = False
1447
+ ) -> List[TextContent]:
1448
+ """
1449
+ Get file and folder structure directly from GitHub API (no indexing required).
1450
+
1451
+ Args:
1452
+ repository: Repository identifier (owner/repo format, e.g., "facebook/react")
1453
+ branch: Optional branch name (defaults to repository's default branch)
1454
+ include_paths: Only show files in these paths (e.g., ["src/", "lib/"])
1455
+ exclude_paths: Hide files in these paths (e.g., ["node_modules/", "dist/", "test/"])
1456
+ file_extensions: Only show these file types (e.g., [".py", ".js", ".ts"])
1457
+ exclude_extensions: Hide these file types (e.g., [".md", ".lock", ".json"])
1458
+ show_full_paths: Show full paths instead of tree structure (default: False)
1459
+
1460
+ Returns:
1461
+ Filtered file tree structure from GitHub with stats
1462
+ """
1463
+ try:
1464
+ client = await ensure_api_client()
816
1465
 
817
- if result.get("success"):
818
- return [TextContent(
819
- type="text",
820
- text=f"✅ Successfully renamed {resource_desc} to '{new_name}'"
821
- )]
822
- else:
1466
+ # Require explicit repository specification
1467
+ if not repository:
823
1468
  return [TextContent(
824
1469
  type="text",
825
- text=f" Failed to rename {resource_type}: {result.get('message', 'Unknown error')}"
1470
+ text="🔍 **Please specify which repository to get file tree from:**\n\n"
1471
+ "Usage: `get_github_file_tree(\"owner/repo\")`\n\n"
1472
+ "**Examples:**\n"
1473
+ "```\n"
1474
+ "get_github_file_tree(\"facebook/react\")\n"
1475
+ "get_github_file_tree(\"microsoft/vscode\", \"main\")\n"
1476
+ "```"
826
1477
  )]
827
1478
 
1479
+ logger.info(f"Getting GitHub tree for repository: {repository}, branch: {branch or 'default'}, filters: {include_paths or exclude_paths or file_extensions or exclude_extensions}")
1480
+
1481
+ # Call API with filters
1482
+ result = await client.get_github_tree(
1483
+ repository,
1484
+ branch=branch,
1485
+ include_paths=include_paths,
1486
+ exclude_paths=exclude_paths,
1487
+ file_extensions=file_extensions,
1488
+ exclude_extensions=exclude_extensions,
1489
+ show_full_paths=show_full_paths
1490
+ )
1491
+
1492
+ # Format response
1493
+ response_text = f"# 📁 GitHub File Tree: {result.get('owner')}/{result.get('repo')}\n\n"
1494
+ response_text += f"**Branch:** `{result.get('branch')}`\n"
1495
+ response_text += f"**SHA:** `{result.get('sha')}`\n"
1496
+ response_text += f"**Retrieved:** {result.get('retrieved_at')}\n"
1497
+ response_text += f"**Source:** GitHub API (always current)\n"
1498
+
1499
+ # Show active filters
1500
+ filters = result.get("filters_applied", {})
1501
+ active_filters = []
1502
+ if filters.get("include_paths"):
1503
+ active_filters.append(f"📂 Included paths: {', '.join(filters['include_paths'])}")
1504
+ if filters.get("exclude_paths"):
1505
+ active_filters.append(f"🚫 Excluded paths: {', '.join(filters['exclude_paths'])}")
1506
+ if filters.get("file_extensions"):
1507
+ active_filters.append(f"📄 File types: {', '.join(filters['file_extensions'])}")
1508
+ if filters.get("exclude_extensions"):
1509
+ active_filters.append(f"🚫 Excluded types: {', '.join(filters['exclude_extensions'])}")
1510
+ if filters.get("show_full_paths"):
1511
+ active_filters.append(f"📍 Showing full paths")
1512
+
1513
+ if active_filters:
1514
+ response_text += f"**Filters:** {' | '.join(active_filters)}\n"
1515
+
1516
+ response_text += "\n"
1517
+
1518
+ # Add stats
1519
+ stats = result.get("stats", {})
1520
+ response_text += "## 📊 Statistics\n\n"
1521
+ response_text += f"- **Total Files:** {stats.get('total_files', 0)}\n"
1522
+ response_text += f"- **Total Directories:** {stats.get('total_directories', 0)}\n"
1523
+ response_text += f"- **Max Depth:** {stats.get('max_depth', 0)} levels\n"
1524
+
1525
+ # File extensions breakdown
1526
+ file_extensions = stats.get("file_extensions", {})
1527
+ if file_extensions:
1528
+ response_text += f"\n**File Types:**\n"
1529
+ sorted_extensions = sorted(file_extensions.items(), key=lambda x: x[1], reverse=True)
1530
+ for ext, count in sorted_extensions[:10]: # Show top 10
1531
+ ext_name = ext if ext != "no_extension" else "(no extension)"
1532
+ response_text += f" - `{ext_name}`: {count} files\n"
1533
+
1534
+ # Tree structure (full)
1535
+ tree_text = result.get("tree_text", "")
1536
+ if tree_text:
1537
+ response_text += "\n## 🌳 Directory Structure\n\n"
1538
+ response_text += "```\n"
1539
+ response_text += tree_text
1540
+ response_text += "\n```\n"
1541
+
1542
+ # Truncation warning
1543
+ if result.get("truncated"):
1544
+ response_text += "\n⚠️ **Note:** Repository is very large. Tree may be truncated by GitHub.\n"
1545
+
1546
+ # Usage hints
1547
+ response_text += "\n---\n"
1548
+ response_text += "💡 **Next Steps:**\n"
1549
+ response_text += f"- Index this repository: `index(\"{repository}\")`\n"
1550
+ response_text += "- Refine with filters (examples below)\n"
1551
+ response_text += "- Use `manage_resource(\"status\", \"repository\", \"{}\")` to check indexing status\n\n".format(repository)
1552
+
1553
+ # Show filter examples if no filters were used
1554
+ if not active_filters:
1555
+ response_text += "**Filter Examples:**\n"
1556
+ response_text += f"- Only Python files: `file_extensions=[\".py\"]`\n"
1557
+ response_text += f"- Exclude tests: `exclude_paths=[\"test/\", \"tests/\"]`\n"
1558
+ response_text += f"- Only src directory: `include_paths=[\"src/\"]`\n"
1559
+ response_text += f"- Full paths: `show_full_paths=True`\n"
1560
+
1561
+ return [TextContent(type="text", text=response_text)]
1562
+
828
1563
  except APIError as e:
829
- logger.error(f"API Error renaming {resource_type}: {e}")
1564
+ logger.error(f"API Error getting GitHub tree: {e}")
830
1565
  error_msg = f"❌ {str(e)}"
831
- if e.status_code == 403 and "lifetime limit" in str(e).lower():
832
- error_msg += "\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1566
+ if e.status_code == 404:
1567
+ error_msg = f" Repository '{repository}' not found or not accessible.\n\n"
1568
+ error_msg += "**Possible reasons:**\n"
1569
+ error_msg += "- Repository doesn't exist\n"
1570
+ error_msg += "- Repository is private and GitHub App not installed\n"
1571
+ error_msg += "- Invalid owner/repo format\n\n"
1572
+ error_msg += "**Note:** You must have the repository indexed first, or have GitHub App installed."
833
1573
  return [TextContent(type="text", text=error_msg)]
834
1574
  except Exception as e:
835
- logger.error(f"Error renaming {resource_type}: {e}")
1575
+ logger.error(f"Error getting GitHub tree: {e}")
836
1576
  return [TextContent(
837
1577
  type="text",
838
- text=f"❌ Error renaming {resource_type}: {str(e)}"
1578
+ text=f"❌ Error getting GitHub file tree: {str(e)}"
839
1579
  )]
840
1580
 
841
- @mcp.tool()
842
- async def delete_resource(
843
- resource_type: str,
844
- identifier: str
845
- ) -> List[TextContent]:
1581
+ # DEPRECATED: Use manage_resource(action="delete") instead
1582
+ # @mcp.tool()
1583
+ # async def delete_resource(
1584
+ # resource_type: str,
1585
+ # identifier: str
1586
+ # ) -> List[TextContent]:
846
1587
  """
847
1588
  Delete an indexed resource (repository or documentation).
848
1589
 
@@ -893,11 +1634,12 @@ async def delete_resource(
893
1634
  text=f"❌ Error deleting {resource_type}: {str(e)}"
894
1635
  )]
895
1636
 
896
- @mcp.tool()
897
- async def check_resource_status(
898
- resource_type: str,
899
- identifier: str
900
- ) -> List[TextContent]:
1637
+ # DEPRECATED: Use manage_resource(action="status") instead
1638
+ # @mcp.tool()
1639
+ # async def check_resource_status(
1640
+ # resource_type: str,
1641
+ # identifier: str
1642
+ # ) -> List[TextContent]:
901
1643
  """
902
1644
  Check the indexing status of a resource (repository or documentation).
903
1645
 
@@ -996,10 +1738,11 @@ async def check_resource_status(
996
1738
  text=f"❌ Error checking {resource_type} status: {str(e)}"
997
1739
  )]
998
1740
 
999
- @mcp.tool()
1000
- async def list_resources(
1001
- resource_type: Optional[str] = None
1002
- ) -> List[TextContent]:
1741
+ # DEPRECATED: Use manage_resource(action="list") instead
1742
+ # @mcp.tool()
1743
+ # async def list_resources(
1744
+ # resource_type: Optional[str] = None
1745
+ # ) -> List[TextContent]:
1003
1746
  """
1004
1747
  List indexed resources (repositories and/or documentation).
1005
1748
 
@@ -1707,171 +2450,6 @@ async def read_source_content(
1707
2450
  type="text",
1708
2451
  text=f"❌ Error reading source content: {str(e)}"
1709
2452
  )]
1710
-
1711
- @mcp.tool()
1712
- async def get_repository_hierarchy(
1713
- repository: str,
1714
- include_classes: bool = True,
1715
- include_methods: bool = False,
1716
- include_tree: bool = False
1717
- ) -> List[TextContent]:
1718
- """
1719
- Get the file hierarchy for an indexed repository.
1720
-
1721
- Args:
1722
- repository: Repository identifier in owner/repo format (e.g., "facebook/react")
1723
- include_classes: Whether to include class names in the hierarchy (default: True)
1724
- include_methods: Whether to include method names in the hierarchy (default: False)
1725
- include_tree: Whether to include the full tree structure (default: False)
1726
-
1727
- Returns:
1728
- Repository file hierarchy with statistics and structural information
1729
- """
1730
- try:
1731
- client = await ensure_api_client()
1732
-
1733
- logger.info(f"Getting repository hierarchy for: {repository}")
1734
-
1735
- # Get the hierarchy data
1736
- result = await client.get_repository_hierarchy(
1737
- owner_repo=repository,
1738
- include_classes=include_classes,
1739
- include_methods=include_methods,
1740
- include_tree=include_tree
1741
- )
1742
-
1743
- # Format the response
1744
- response_lines = []
1745
-
1746
- # Header
1747
- response_lines.extend([
1748
- f"# 🏗️ Repository File Hierarchy: {repository}",
1749
- "",
1750
- f"**Repository ID:** {result.get('repository_id', 'Unknown')}",
1751
- f"**Generated:** {result.get('generated_at', 'Unknown')}",
1752
- ""
1753
- ])
1754
-
1755
- # Options used
1756
- options = result.get("options", {})
1757
- response_lines.extend([
1758
- "**Options:**",
1759
- f"- Include Classes: {'✅' if options.get('include_classes', include_classes) else '❌'}",
1760
- f"- Include Methods: {'✅' if options.get('include_methods', include_methods) else '❌'}",
1761
- f"- Include Tree: {'✅' if include_tree else '❌'}",
1762
- ""
1763
- ])
1764
-
1765
- # Statistics
1766
- stats = result.get("stats", {})
1767
- if stats:
1768
- response_lines.extend([
1769
- "## 📊 Statistics",
1770
- ""
1771
- ])
1772
-
1773
- if stats.get("total_files"):
1774
- response_lines.append(f"📄 **Total Files:** {stats['total_files']:,}")
1775
- if stats.get("total_directories"):
1776
- response_lines.append(f"📁 **Total Directories:** {stats['total_directories']:,}")
1777
- if stats.get("total_classes"):
1778
- response_lines.append(f"🏷️ **Total Classes:** {stats['total_classes']:,}")
1779
- if stats.get("total_methods"):
1780
- response_lines.append(f"⚙️ **Total Methods:** {stats['total_methods']:,}")
1781
-
1782
- # File types breakdown
1783
- if stats.get("file_types"):
1784
- response_lines.extend([
1785
- "",
1786
- "**File Types:**"
1787
- ])
1788
- file_types = stats["file_types"]
1789
- # Sort by count descending
1790
- sorted_types = sorted(file_types.items(), key=lambda x: x[1], reverse=True)
1791
- for ext, count in sorted_types[:10]: # Show top 10
1792
- response_lines.append(f"- `{ext}`: {count:,} files")
1793
- if len(sorted_types) > 10:
1794
- response_lines.append(f"- ... and {len(sorted_types) - 10} more types")
1795
-
1796
- response_lines.append("")
1797
-
1798
- # Hierarchy text
1799
- hierarchy_text = result.get("hierarchy_text", "")
1800
- if hierarchy_text:
1801
- response_lines.extend([
1802
- "## 🌳 File Hierarchy",
1803
- "",
1804
- "```",
1805
- hierarchy_text,
1806
- "```",
1807
- ""
1808
- ])
1809
-
1810
- # Tree structure if requested and available
1811
- if include_tree and result.get("hierarchy_tree"):
1812
- response_lines.extend([
1813
- "## 🗂️ Detailed Tree Structure",
1814
- "",
1815
- "```json",
1816
- json.dumps(result["hierarchy_tree"], indent=2),
1817
- "```",
1818
- ""
1819
- ])
1820
-
1821
- # Usage tips
1822
- response_lines.extend([
1823
- "## 💡 Usage Tips",
1824
- "",
1825
- "- Use this hierarchy to understand the codebase structure before searching",
1826
- "- Include methods for detailed function discovery",
1827
- "- Use `search_codebase` with specific file paths found here",
1828
- "- Reference file paths when using `read_source_content`"
1829
- ])
1830
-
1831
- return [TextContent(type="text", text="\n".join(response_lines))]
1832
-
1833
- except APIError as e:
1834
- logger.error(f"API Error getting repository hierarchy: {e}")
1835
-
1836
- if e.status_code == 404:
1837
- return [TextContent(
1838
- type="text",
1839
- text=f"❌ Repository '{repository}' not found. Please check:\n\n"
1840
- "1. The repository name is correct (owner/repo format)\n"
1841
- "2. The repository has been indexed\n"
1842
- "3. You have access to this repository\n\n"
1843
- "Use `list_repositories` to see available repositories."
1844
- )]
1845
- elif e.status_code == 400:
1846
- return [TextContent(
1847
- type="text",
1848
- text=f"❌ Repository not ready: {e.detail}\n\n"
1849
- f"The repository may still be indexing. Use `check_resource_status(\"repository\", \"{repository}\")` to check status."
1850
- )]
1851
- elif e.status_code == 403 or "free tier limit" in str(e).lower():
1852
- if e.detail and "3 free indexing operations" in e.detail:
1853
- return [TextContent(
1854
- type="text",
1855
- text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited access."
1856
- )]
1857
- else:
1858
- return [TextContent(
1859
- type="text",
1860
- text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
1861
- )]
1862
- else:
1863
- return [TextContent(type="text", text=f"❌ {str(e)}")]
1864
- except Exception as e:
1865
- logger.error(f"Unexpected error getting repository hierarchy: {e}")
1866
- return [TextContent(
1867
- type="text",
1868
- text=f"❌ Error getting repository hierarchy: {str(e)}\n\n"
1869
- "Please check that:\n"
1870
- "- The repository identifier is correct\n"
1871
- "- The repository is fully indexed\n"
1872
- "- Your API connection is working"
1873
- )]
1874
-
1875
2453
  # @mcp.tool()
1876
2454
  # async def index_local_filesystem(
1877
2455
  # directory_path: str,
@@ -2758,16 +3336,545 @@ async def nia_bug_report(
2758
3336
  # Context Sharing Tools
2759
3337
 
2760
3338
  @mcp.tool()
2761
- async def save_context(
2762
- title: str,
2763
- summary: str,
2764
- content: str,
2765
- agent_source: str,
3339
+ async def context(
3340
+ action: str,
3341
+ # For save action
3342
+ title: Optional[str] = None,
3343
+ summary: Optional[str] = None,
3344
+ content: Optional[str] = None,
3345
+ agent_source: Optional[str] = None,
2766
3346
  tags: Optional[List[str]] = None,
2767
3347
  metadata: Optional[dict] = None,
2768
3348
  nia_references: Optional[dict] = None,
2769
- edited_files: Optional[List[dict]] = None
3349
+ edited_files: Optional[List[dict]] = None,
3350
+ # For list/search actions
3351
+ limit: int = 20,
3352
+ offset: int = 0,
3353
+ # For retrieve/update/delete actions
3354
+ context_id: Optional[str] = None,
3355
+ # For search action
3356
+ query: Optional[str] = None
2770
3357
  ) -> List[TextContent]:
3358
+ """
3359
+ Unified context management tool for saving, listing, retrieving, searching, updating, and deleting conversation contexts.
3360
+
3361
+ Args:
3362
+ action: Action to perform - "save", "list", "retrieve", "search", "update", or "delete"
3363
+
3364
+ # Save action parameters (all required except tags, metadata, nia_references, edited_files):
3365
+ title: Descriptive title for the context
3366
+ summary: Brief summary of the conversation (10-1000 chars)
3367
+ content: Full conversation context (minimum 50 chars)
3368
+ agent_source: Which agent is creating this context (e.g., "cursor", "claude-code")
3369
+ tags: Optional list of searchable tags
3370
+ metadata: Optional metadata like file paths, repositories discussed, etc.
3371
+ nia_references: Structured data about NIA resources used during conversation
3372
+ edited_files: List of files that were modified during conversation
3373
+
3374
+ # List action parameters (all optional):
3375
+ limit: Number of contexts to return (1-100, default: 20)
3376
+ offset: Number of contexts to skip for pagination (default: 0)
3377
+
3378
+ # Retrieve action parameters:
3379
+ context_id: The unique ID of the context to retrieve (required)
3380
+
3381
+ # Search action parameters:
3382
+ query: Search query to match against title, summary, content, and tags (required)
3383
+ limit: Maximum number of results to return (1-100, default: 20)
3384
+
3385
+ # Update action parameters:
3386
+ context_id: The unique ID of the context to update (required)
3387
+ title: Updated title (optional)
3388
+ summary: Updated summary (optional)
3389
+ content: Updated content (optional)
3390
+ tags: Updated tags list (optional)
3391
+ metadata: Updated metadata (optional)
3392
+
3393
+ # Delete action parameters:
3394
+ context_id: The unique ID of the context to delete (required)
3395
+
3396
+ Examples:
3397
+ # Save context
3398
+ context(action="save", title="API Design", summary="Discussed REST patterns",
3399
+ content="Full conversation...", agent_source="cursor")
3400
+
3401
+ # List contexts
3402
+ context(action="list", limit=10, agent_source="cursor")
3403
+
3404
+ # Retrieve context
3405
+ context(action="retrieve", context_id="550e8400-e29b-41d4-a716-446655440000")
3406
+
3407
+ # Search contexts
3408
+ context(action="search", query="authentication", limit=10)
3409
+
3410
+ # Update context
3411
+ context(action="update", context_id="550e8400...", title="New Title", tags=["updated"])
3412
+
3413
+ # Delete context
3414
+ context(action="delete", context_id="550e8400-e29b-41d4-a716-446655440000")
3415
+ """
3416
+ try:
3417
+ # Validate action
3418
+ valid_actions = ["save", "list", "retrieve", "search", "update", "delete"]
3419
+ if action not in valid_actions:
3420
+ return [TextContent(
3421
+ type="text",
3422
+ text=f"❌ Invalid action: '{action}'. Must be one of: {', '.join(valid_actions)}"
3423
+ )]
3424
+
3425
+ client = await ensure_api_client()
3426
+
3427
+ # ===== SAVE ACTION =====
3428
+ if action == "save":
3429
+ # Validate required parameters
3430
+ if not title or not title.strip():
3431
+ return [TextContent(type="text", text="❌ Error: title is required for save action")]
3432
+ if not summary:
3433
+ return [TextContent(type="text", text="❌ Error: summary is required for save action")]
3434
+ if not content:
3435
+ return [TextContent(type="text", text="❌ Error: content is required for save action")]
3436
+ if not agent_source or not agent_source.strip():
3437
+ return [TextContent(type="text", text="❌ Error: agent_source is required for save action")]
3438
+
3439
+ # Validate field lengths
3440
+ if len(title) > 200:
3441
+ return [TextContent(type="text", text="❌ Error: Title must be 200 characters or less")]
3442
+ if len(summary) < 10 or len(summary) > 1000:
3443
+ return [TextContent(type="text", text="❌ Error: Summary must be 10-1000 characters")]
3444
+ if len(content) < 50:
3445
+ return [TextContent(type="text", text="❌ Error: Content must be at least 50 characters")]
3446
+
3447
+ logger.info(f"Saving context: title='{title}', agent={agent_source}, content_length={len(content)}")
3448
+
3449
+ result = await client.save_context(
3450
+ title=title.strip(),
3451
+ summary=summary.strip(),
3452
+ content=content,
3453
+ agent_source=agent_source.strip(),
3454
+ tags=tags or [],
3455
+ metadata=metadata or {},
3456
+ nia_references=nia_references,
3457
+ edited_files=edited_files or []
3458
+ )
3459
+
3460
+ context_id_result = result.get("id")
3461
+
3462
+ return [TextContent(
3463
+ type="text",
3464
+ text=f"✅ **Context Saved Successfully!**\n\n"
3465
+ f"🆔 **Context ID:** `{context_id_result}`\n"
3466
+ f"📝 **Title:** {title}\n"
3467
+ f"🤖 **Source Agent:** {agent_source}\n"
3468
+ f"📊 **Content Length:** {len(content):,} characters\n"
3469
+ f"🏷️ **Tags:** {', '.join(tags) if tags else 'None'}\n\n"
3470
+ f"**Next Steps:**\n"
3471
+ f"• Other agents can now retrieve this context using the context ID\n"
3472
+ f"• Use `context(action='search', query='...')` to find contexts\n"
3473
+ f"• Use `context(action='list')` to see all your saved contexts\n\n"
3474
+ f"🔗 **Share this context:** Provide the context ID `{context_id_result}` to other agents"
3475
+ )]
3476
+
3477
+ # ===== LIST ACTION =====
3478
+ elif action == "list":
3479
+ # Validate parameters
3480
+ if limit < 1 or limit > 100:
3481
+ return [TextContent(type="text", text="❌ Error: Limit must be between 1 and 100")]
3482
+ if offset < 0:
3483
+ return [TextContent(type="text", text="❌ Error: Offset must be 0 or greater")]
3484
+
3485
+ # Convert tags list to comma-separated string if provided
3486
+ tags_filter = ','.join(tags) if tags and isinstance(tags, list) else (tags if isinstance(tags, str) else None)
3487
+
3488
+ result = await client.list_contexts(
3489
+ limit=limit,
3490
+ offset=offset,
3491
+ tags=tags_filter,
3492
+ agent_source=agent_source
3493
+ )
3494
+
3495
+ contexts = result.get("contexts", [])
3496
+ pagination = result.get("pagination", {})
3497
+
3498
+ if not contexts:
3499
+ response = "📭 **No Contexts Found**\n\n"
3500
+ if tags or agent_source:
3501
+ response += "No contexts match your filters.\n\n"
3502
+ else:
3503
+ response += "You haven't saved any contexts yet.\n\n"
3504
+
3505
+ response += "**Get started:**\n"
3506
+ response += "• Use `context(action='save', ...)` to save a conversation for cross-agent sharing\n"
3507
+ response += "• Perfect for handoffs between Cursor and Claude Code!"
3508
+
3509
+ return [TextContent(type="text", text=response)]
3510
+
3511
+ # Format the response
3512
+ response = f"📚 **Your Conversation Contexts** ({pagination.get('total', len(contexts))} total)\n\n"
3513
+
3514
+ for i, ctx in enumerate(contexts, offset + 1):
3515
+ created_at = ctx.get('created_at', '')
3516
+ if created_at:
3517
+ try:
3518
+ from datetime import datetime
3519
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3520
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3521
+ except:
3522
+ formatted_date = created_at
3523
+ else:
3524
+ formatted_date = 'Unknown'
3525
+
3526
+ response += f"**{i}. {ctx['title']}**\n"
3527
+ response += f" 🆔 ID: `{ctx['id']}`\n"
3528
+ response += f" 🤖 Source: {ctx['agent_source']}\n"
3529
+ response += f" 📅 Created: {formatted_date}\n"
3530
+ response += f" 📝 Summary: {ctx['summary'][:100]}{'...' if len(ctx['summary']) > 100 else ''}\n"
3531
+ if ctx.get('tags'):
3532
+ response += f" 🏷️ Tags: {', '.join(ctx['tags'])}\n"
3533
+ response += "\n"
3534
+
3535
+ # Add pagination info
3536
+ if pagination.get('has_more'):
3537
+ next_offset = offset + limit
3538
+ response += f"📄 **Pagination:** Showing {offset + 1}-{offset + len(contexts)} of {pagination.get('total')}\n"
3539
+ response += f" Use `context(action='list', offset={next_offset})` for next page\n"
3540
+
3541
+ response += "\n**Actions:**\n"
3542
+ response += "• `context(action='retrieve', context_id='...')` - Get full context\n"
3543
+ response += "• `context(action='search', query='...')` - Search contexts\n"
3544
+ response += "• `context(action='delete', context_id='...')` - Remove context"
3545
+
3546
+ return [TextContent(type="text", text=response)]
3547
+
3548
+ # ===== RETRIEVE ACTION =====
3549
+ elif action == "retrieve":
3550
+ if not context_id or not context_id.strip():
3551
+ return [TextContent(type="text", text="❌ Error: context_id is required for retrieve action")]
3552
+
3553
+ ctx = await client.get_context(context_id.strip())
3554
+
3555
+ if not ctx:
3556
+ return [TextContent(
3557
+ type="text",
3558
+ text=f"❌ **Context Not Found**\n\n"
3559
+ f"Context ID `{context_id}` was not found.\n\n"
3560
+ f"**Possible reasons:**\n"
3561
+ f"• The context ID is incorrect\n"
3562
+ f"• The context belongs to a different user\n"
3563
+ f"• The context has been deleted\n\n"
3564
+ f"Use `context(action='list')` to see your available contexts."
3565
+ )]
3566
+
3567
+ # Format the context display
3568
+ created_at = ctx.get('created_at', '')
3569
+ if created_at:
3570
+ try:
3571
+ from datetime import datetime
3572
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3573
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3574
+ except:
3575
+ formatted_date = created_at
3576
+ else:
3577
+ formatted_date = 'Unknown'
3578
+
3579
+ updated_at = ctx.get('updated_at', '')
3580
+ formatted_updated = None
3581
+ if updated_at:
3582
+ try:
3583
+ from datetime import datetime
3584
+ dt = datetime.fromisoformat(updated_at.replace('Z', '+00:00'))
3585
+ formatted_updated = dt.strftime('%Y-%m-%d %H:%M UTC')
3586
+ except:
3587
+ formatted_updated = updated_at
3588
+
3589
+ response = f"📋 **Context: {ctx['title']}**\n\n"
3590
+ response += f"🆔 **ID:** `{ctx['id']}`\n"
3591
+ response += f"🤖 **Source Agent:** {ctx['agent_source']}\n"
3592
+ response += f"📅 **Created:** {formatted_date}\n"
3593
+ if formatted_updated:
3594
+ response += f"🔄 **Updated:** {formatted_updated}\n"
3595
+
3596
+ if ctx.get('tags'):
3597
+ response += f"🏷️ **Tags:** {', '.join(ctx['tags'])}\n"
3598
+
3599
+ response += f"\n📝 **Summary:**\n{ctx['summary']}\n\n"
3600
+
3601
+ # Add NIA References
3602
+ nia_refs = ctx.get('nia_references') or {}
3603
+ if nia_refs:
3604
+ response += "🧠 **NIA RESOURCES USED - RECOMMENDED ACTIONS:**\n"
3605
+
3606
+ indexed_resources = nia_refs.get('indexed_resources', [])
3607
+ if indexed_resources:
3608
+ response += "**📦 Re-index these resources:**\n"
3609
+ for resource in indexed_resources:
3610
+ identifier = resource.get('identifier', 'Unknown')
3611
+ resource_type = resource.get('resource_type', 'unknown')
3612
+ purpose = resource.get('purpose', 'No purpose specified')
3613
+
3614
+ if resource_type == 'repository':
3615
+ response += f"• `Index {identifier}` - {purpose}\n"
3616
+ elif resource_type == 'documentation':
3617
+ response += f"• `Index documentation {identifier}` - {purpose}\n"
3618
+ else:
3619
+ response += f"• `Index {identifier}` ({resource_type}) - {purpose}\n"
3620
+ response += "\n"
3621
+
3622
+ search_queries = nia_refs.get('search_queries', [])
3623
+ if search_queries:
3624
+ response += "**🔍 Useful search queries to re-run:**\n"
3625
+ for q in search_queries:
3626
+ query_text = q.get('query', 'Unknown query')
3627
+ query_type = q.get('query_type', 'search')
3628
+ key_findings = q.get('key_findings', 'No findings specified')
3629
+ resources_searched = q.get('resources_searched', [])
3630
+
3631
+ response += f"• **Query:** `{query_text}` ({query_type})\n"
3632
+ if resources_searched:
3633
+ response += f" **Resources:** {', '.join(resources_searched)}\n"
3634
+ response += f" **Key Findings:** {key_findings}\n"
3635
+ response += "\n"
3636
+
3637
+ session_summary = nia_refs.get('session_summary')
3638
+ if session_summary:
3639
+ response += f"**📋 NIA Session Summary:** {session_summary}\n\n"
3640
+
3641
+ # Add Edited Files
3642
+ edited_files_list = ctx.get('edited_files') or []
3643
+ if edited_files_list:
3644
+ response += "📝 **FILES MODIFIED - READ THESE TO GET UP TO SPEED:**\n"
3645
+ for file_info in edited_files_list:
3646
+ file_path = file_info.get('file_path', 'Unknown file')
3647
+ operation = file_info.get('operation', 'modified')
3648
+ changes_desc = file_info.get('changes_description', 'No description')
3649
+ key_changes = file_info.get('key_changes', [])
3650
+ language = file_info.get('language', '')
3651
+
3652
+ operation_emoji = {
3653
+ 'created': '🆕',
3654
+ 'modified': '✏️',
3655
+ 'deleted': '🗑️'
3656
+ }.get(operation, '📄')
3657
+
3658
+ response += f"• {operation_emoji} **`{file_path}`** ({operation})\n"
3659
+ response += f" **Changes:** {changes_desc}\n"
3660
+
3661
+ if key_changes:
3662
+ response += f" **Key Changes:** {', '.join(key_changes)}\n"
3663
+ if language:
3664
+ response += f" **Language:** {language}\n"
3665
+
3666
+ response += f" **💡 Action:** Read this file with: `Read {file_path}`\n"
3667
+ response += "\n"
3668
+
3669
+ # Add metadata if available
3670
+ metadata_dict = ctx.get('metadata') or {}
3671
+ if metadata_dict:
3672
+ response += f"📊 **Additional Metadata:**\n"
3673
+ for key, value in metadata_dict.items():
3674
+ if isinstance(value, list):
3675
+ response += f"• **{key}:** {', '.join(map(str, value))}\n"
3676
+ else:
3677
+ response += f"• **{key}:** {value}\n"
3678
+ response += "\n"
3679
+
3680
+ response += f"📄 **Full Context:**\n\n{ctx['content']}\n\n"
3681
+
3682
+ response += f"---\n"
3683
+ response += f"🚀 **NEXT STEPS FOR SEAMLESS HANDOFF:**\n"
3684
+ response += f"• This context was created by **{ctx['agent_source']}**\n"
3685
+
3686
+ if nia_refs.get('search_queries'):
3687
+ response += f"• **RECOMMENDED:** Re-run the search queries to get the same insights\n"
3688
+ if edited_files_list:
3689
+ response += f"• **ESSENTIAL:** Read the modified files above to understand code changes\n"
3690
+
3691
+ response += f"• Use the summary and full context to understand the strategic planning\n"
3692
+
3693
+ return [TextContent(type="text", text=response)]
3694
+
3695
+ # ===== SEARCH ACTION =====
3696
+ elif action == "search":
3697
+ if not query or not query.strip():
3698
+ return [TextContent(type="text", text="❌ Error: query is required for search action")]
3699
+
3700
+ if limit < 1 or limit > 100:
3701
+ return [TextContent(type="text", text="❌ Error: Limit must be between 1 and 100")]
3702
+
3703
+ # Convert tags list to comma-separated string if provided
3704
+ tags_filter = ','.join(tags) if tags and isinstance(tags, list) else (tags if isinstance(tags, str) else None)
3705
+
3706
+ result = await client.search_contexts(
3707
+ query=query.strip(),
3708
+ limit=limit,
3709
+ tags=tags_filter,
3710
+ agent_source=agent_source
3711
+ )
3712
+
3713
+ contexts = result.get("contexts", [])
3714
+
3715
+ if not contexts:
3716
+ response = f"🔍 **No Results Found**\n\n"
3717
+ response += f"No contexts match your search query: \"{query}\"\n\n"
3718
+
3719
+ if tags or agent_source:
3720
+ response += f"**Active filters:**\n"
3721
+ if tags:
3722
+ response += f"• Tags: {tags if isinstance(tags, str) else ', '.join(tags)}\n"
3723
+ if agent_source:
3724
+ response += f"• Agent: {agent_source}\n"
3725
+ response += "\n"
3726
+
3727
+ response += f"**Suggestions:**\n"
3728
+ response += f"• Try different keywords\n"
3729
+ response += f"• Remove filters to broaden search\n"
3730
+ response += f"• Use `context(action='list')` to see all contexts"
3731
+
3732
+ return [TextContent(type="text", text=response)]
3733
+
3734
+ # Format search results
3735
+ response = f"🔍 **Search Results for \"{query}\"** ({len(contexts)} found)\n\n"
3736
+
3737
+ for i, ctx in enumerate(contexts, 1):
3738
+ created_at = ctx.get('created_at', '')
3739
+ if created_at:
3740
+ try:
3741
+ from datetime import datetime
3742
+ dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
3743
+ formatted_date = dt.strftime('%Y-%m-%d %H:%M UTC')
3744
+ except:
3745
+ formatted_date = created_at
3746
+ else:
3747
+ formatted_date = 'Unknown'
3748
+
3749
+ response += f"**{i}. {ctx['title']}**\n"
3750
+ response += f" 🆔 ID: `{ctx['id']}`\n"
3751
+ response += f" 🤖 Source: {ctx['agent_source']}\n"
3752
+ response += f" 📅 Created: {formatted_date}\n"
3753
+ response += f" 📝 Summary: {ctx['summary'][:150]}{'...' if len(ctx['summary']) > 150 else ''}\n"
3754
+
3755
+ if ctx.get('tags'):
3756
+ response += f" 🏷️ Tags: {', '.join(ctx['tags'])}\n"
3757
+
3758
+ response += "\n"
3759
+
3760
+ response += f"**Actions:**\n"
3761
+ response += f"• `context(action='retrieve', context_id='...')` - Get full context\n"
3762
+ response += f"• Refine search with different keywords\n"
3763
+ response += f"• Use tags or agent filters for better results"
3764
+
3765
+ return [TextContent(type="text", text=response)]
3766
+
3767
+ # ===== UPDATE ACTION =====
3768
+ elif action == "update":
3769
+ if not context_id or not context_id.strip():
3770
+ return [TextContent(type="text", text="❌ Error: context_id is required for update action")]
3771
+
3772
+ # Check that at least one field is being updated
3773
+ if not any([title, summary, content, tags is not None, metadata is not None]):
3774
+ return [TextContent(
3775
+ type="text",
3776
+ text="❌ Error: At least one field must be provided for update (title, summary, content, tags, or metadata)"
3777
+ )]
3778
+
3779
+ # Validate fields if provided
3780
+ if title is not None and (not title.strip() or len(title) > 200):
3781
+ return [TextContent(type="text", text="❌ Error: Title must be 1-200 characters")]
3782
+
3783
+ if summary is not None and (len(summary) < 10 or len(summary) > 1000):
3784
+ return [TextContent(type="text", text="❌ Error: Summary must be 10-1000 characters")]
3785
+
3786
+ if content is not None and len(content) < 50:
3787
+ return [TextContent(type="text", text="❌ Error: Content must be at least 50 characters")]
3788
+
3789
+ if tags is not None and len(tags) > 10:
3790
+ return [TextContent(type="text", text="❌ Error: Maximum 10 tags allowed")]
3791
+
3792
+ result = await client.update_context(
3793
+ context_id=context_id.strip(),
3794
+ title=title.strip() if title else None,
3795
+ summary=summary.strip() if summary else None,
3796
+ content=content,
3797
+ tags=tags,
3798
+ metadata=metadata
3799
+ )
3800
+
3801
+ if not result:
3802
+ return [TextContent(
3803
+ type="text",
3804
+ text=f"❌ Error: Context with ID `{context_id}` not found"
3805
+ )]
3806
+
3807
+ # List updated fields
3808
+ updated_fields = []
3809
+ if title is not None:
3810
+ updated_fields.append("title")
3811
+ if summary is not None:
3812
+ updated_fields.append("summary")
3813
+ if content is not None:
3814
+ updated_fields.append("content")
3815
+ if tags is not None:
3816
+ updated_fields.append("tags")
3817
+ if metadata is not None:
3818
+ updated_fields.append("metadata")
3819
+
3820
+ response = f"✅ **Context Updated Successfully!**\n\n"
3821
+ response += f"🆔 **Context ID:** `{context_id}`\n"
3822
+ response += f"📝 **Title:** {result['title']}\n"
3823
+ response += f"🔄 **Updated Fields:** {', '.join(updated_fields)}\n"
3824
+ response += f"🤖 **Source Agent:** {result['agent_source']}\n\n"
3825
+
3826
+ response += f"**Current Status:**\n"
3827
+ response += f"• **Tags:** {', '.join(result['tags']) if result.get('tags') else 'None'}\n"
3828
+ response += f"• **Content Length:** {len(result['content']):,} characters\n\n"
3829
+
3830
+ response += f"Use `context(action='retrieve', context_id='{context_id}')` to see the full updated context."
3831
+
3832
+ return [TextContent(type="text", text=response)]
3833
+
3834
+ # ===== DELETE ACTION =====
3835
+ elif action == "delete":
3836
+ if not context_id or not context_id.strip():
3837
+ return [TextContent(type="text", text="❌ Error: context_id is required for delete action")]
3838
+
3839
+ success = await client.delete_context(context_id.strip())
3840
+
3841
+ if success:
3842
+ return [TextContent(
3843
+ type="text",
3844
+ text=f"✅ **Context Deleted Successfully!**\n\n"
3845
+ f"🆔 **Context ID:** `{context_id}`\n\n"
3846
+ f"The context has been permanently removed from your account.\n"
3847
+ f"This action cannot be undone.\n\n"
3848
+ f"Use `context(action='list')` to see your remaining contexts."
3849
+ )]
3850
+ else:
3851
+ return [TextContent(
3852
+ type="text",
3853
+ text=f"❌ **Context Not Found**\n\n"
3854
+ f"Context ID `{context_id}` was not found or has already been deleted.\n\n"
3855
+ f"Use `context(action='list')` to see your available contexts."
3856
+ )]
3857
+
3858
+ except APIError as e:
3859
+ logger.error(f"API Error in context ({action}): {e}")
3860
+ return [TextContent(type="text", text=f"❌ API Error: {str(e)}")]
3861
+ except Exception as e:
3862
+ logger.error(f"Error in context ({action}): {e}")
3863
+ return [TextContent(type="text", text=f"❌ Error in {action} operation: {str(e)}")]
3864
+
3865
+ # DEPRECATED: Individual context tools below - use context() with action parameter instead
3866
+
3867
+ # @mcp.tool()
3868
+ # async def save_context(
3869
+ # title: str,
3870
+ # summary: str,
3871
+ # content: str,
3872
+ # agent_source: str,
3873
+ # tags: Optional[List[str]] = None,
3874
+ # metadata: Optional[dict] = None,
3875
+ # nia_references: Optional[dict] = None,
3876
+ # edited_files: Optional[List[dict]] = None
3877
+ # ) -> List[TextContent]:
2771
3878
  """
2772
3879
  Save a conversation context for cross-agent sharing.
2773
3880
 
@@ -2849,13 +3956,14 @@ async def save_context(
2849
3956
  logger.error(f"Error saving context: {e}")
2850
3957
  return [TextContent(type="text", text=f"❌ Error saving context: {str(e)}")]
2851
3958
 
2852
- @mcp.tool()
2853
- async def list_contexts(
2854
- limit: int = 20,
2855
- offset: int = 0,
2856
- tags: Optional[str] = None,
2857
- agent_source: Optional[str] = None
2858
- ) -> List[TextContent]:
3959
+ # DEPRECATED: Use context(action="list") instead
3960
+ # @mcp.tool()
3961
+ # async def list_contexts(
3962
+ # limit: int = 20,
3963
+ # offset: int = 0,
3964
+ # tags: Optional[str] = None,
3965
+ # agent_source: Optional[str] = None
3966
+ # ) -> List[TextContent]:
2859
3967
  """
2860
3968
  List saved conversation contexts with pagination and filtering.
2861
3969
 
@@ -2946,8 +4054,9 @@ async def list_contexts(
2946
4054
  logger.error(f"Error listing contexts: {e}")
2947
4055
  return [TextContent(type="text", text=f"❌ Error listing contexts: {str(e)}")]
2948
4056
 
2949
- @mcp.tool()
2950
- async def retrieve_context(context_id: str) -> List[TextContent]:
4057
+ # DEPRECATED: Use context(action="retrieve") instead
4058
+ # @mcp.tool()
4059
+ # async def retrieve_context(context_id: str) -> List[TextContent]:
2951
4060
  """
2952
4061
  Retrieve a specific conversation context by ID.
2953
4062
 
@@ -3123,13 +4232,14 @@ async def retrieve_context(context_id: str) -> List[TextContent]:
3123
4232
  logger.error(f"Error retrieving context: {e}")
3124
4233
  return [TextContent(type="text", text=f"❌ Error retrieving context: {str(e)}")]
3125
4234
 
3126
- @mcp.tool()
3127
- async def search_contexts(
3128
- query: str,
3129
- limit: int = 20,
3130
- tags: Optional[str] = None,
3131
- agent_source: Optional[str] = None
3132
- ) -> List[TextContent]:
4235
+ # DEPRECATED: Use context(action="search") instead
4236
+ # @mcp.tool()
4237
+ # async def search_contexts(
4238
+ # query: str,
4239
+ # limit: int = 20,
4240
+ # tags: Optional[str] = None,
4241
+ # agent_source: Optional[str] = None
4242
+ # ) -> List[TextContent]:
3133
4243
  """
3134
4244
  Search conversation contexts by content, title, or summary.
3135
4245
 
@@ -3220,15 +4330,16 @@ async def search_contexts(
3220
4330
  logger.error(f"Error searching contexts: {e}")
3221
4331
  return [TextContent(type="text", text=f"❌ Error searching contexts: {str(e)}")]
3222
4332
 
3223
- @mcp.tool()
3224
- async def update_context(
3225
- context_id: str,
3226
- title: Optional[str] = None,
3227
- summary: Optional[str] = None,
3228
- content: Optional[str] = None,
3229
- tags: Optional[List[str]] = None,
3230
- metadata: Optional[dict] = None
3231
- ) -> List[TextContent]:
4333
+ # DEPRECATED: Use context(action="update") instead
4334
+ # @mcp.tool()
4335
+ # async def update_context(
4336
+ # context_id: str,
4337
+ # title: Optional[str] = None,
4338
+ # summary: Optional[str] = None,
4339
+ # content: Optional[str] = None,
4340
+ # tags: Optional[List[str]] = None,
4341
+ # metadata: Optional[dict] = None
4342
+ # ) -> List[TextContent]:
3232
4343
  """
3233
4344
  Update an existing conversation context.
3234
4345
 
@@ -3330,8 +4441,9 @@ async def update_context(
3330
4441
  logger.error(f"Error updating context: {e}")
3331
4442
  return [TextContent(type="text", text=f"❌ Error updating context: {str(e)}")]
3332
4443
 
3333
- @mcp.tool()
3334
- async def delete_context(context_id: str) -> List[TextContent]:
4444
+ # DEPRECATED: Use context(action="delete") instead
4445
+ # @mcp.tool()
4446
+ # async def delete_context(context_id: str) -> List[TextContent]:
3335
4447
  """
3336
4448
  Delete a conversation context.
3337
4449