nia-mcp-server 1.0.16__py3-none-any.whl → 1.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nia-mcp-server might be problematic. Click here for more details.
- nia_mcp_server/__init__.py +1 -1
- nia_mcp_server/api_client.py +75 -2
- nia_mcp_server/server.py +427 -14
- {nia_mcp_server-1.0.16.dist-info → nia_mcp_server-1.0.18.dist-info}/METADATA +1 -1
- {nia_mcp_server-1.0.16.dist-info → nia_mcp_server-1.0.18.dist-info}/RECORD +8 -8
- {nia_mcp_server-1.0.16.dist-info → nia_mcp_server-1.0.18.dist-info}/WHEEL +0 -0
- {nia_mcp_server-1.0.16.dist-info → nia_mcp_server-1.0.18.dist-info}/entry_points.txt +0 -0
- {nia_mcp_server-1.0.16.dist-info → nia_mcp_server-1.0.18.dist-info}/licenses/LICENSE +0 -0
nia_mcp_server/__init__.py
CHANGED
nia_mcp_server/api_client.py
CHANGED
|
@@ -176,7 +176,10 @@ class NIAApiClient:
|
|
|
176
176
|
# Regular repo URL - extract owner/repo
|
|
177
177
|
parts = clean_url.rstrip('/').split('/')
|
|
178
178
|
if len(parts) >= 2:
|
|
179
|
-
repo_name = parts[-1]
|
|
179
|
+
repo_name = parts[-1]
|
|
180
|
+
# Remove .git suffix if present
|
|
181
|
+
if repo_name.endswith('.git'):
|
|
182
|
+
repo_name = repo_name[:-4]
|
|
180
183
|
repository_path = f"{parts[-2]}/{repo_name}"
|
|
181
184
|
else:
|
|
182
185
|
repository_path = repo_url
|
|
@@ -685,4 +688,74 @@ class NIAApiClient:
|
|
|
685
688
|
except httpx.HTTPStatusError as e:
|
|
686
689
|
raise self._handle_api_error(e)
|
|
687
690
|
except Exception as e:
|
|
688
|
-
raise APIError(f"Failed to get source content: {str(e)}")
|
|
691
|
+
raise APIError(f"Failed to get source content: {str(e)}")
|
|
692
|
+
|
|
693
|
+
async def index_local_filesystem(
|
|
694
|
+
self,
|
|
695
|
+
directory_path: str,
|
|
696
|
+
inclusion_patterns: List[str] = None,
|
|
697
|
+
exclusion_patterns: List[str] = None,
|
|
698
|
+
max_file_size_mb: int = 50
|
|
699
|
+
) -> Dict[str, Any]:
|
|
700
|
+
"""Index a local filesystem directory."""
|
|
701
|
+
try:
|
|
702
|
+
payload = {
|
|
703
|
+
"directory_path": directory_path,
|
|
704
|
+
"inclusion_patterns": inclusion_patterns or [],
|
|
705
|
+
"exclusion_patterns": exclusion_patterns or [],
|
|
706
|
+
"max_file_size_mb": max_file_size_mb
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
response = await self.client.post(
|
|
710
|
+
f"{self.base_url}/v2/local-filesystem",
|
|
711
|
+
json=payload
|
|
712
|
+
)
|
|
713
|
+
response.raise_for_status()
|
|
714
|
+
return response.json()
|
|
715
|
+
|
|
716
|
+
except httpx.HTTPStatusError as e:
|
|
717
|
+
raise self._handle_api_error(e)
|
|
718
|
+
except Exception as e:
|
|
719
|
+
raise APIError(f"Failed to index local filesystem: {str(e)}")
|
|
720
|
+
|
|
721
|
+
async def scan_local_filesystem(
|
|
722
|
+
self,
|
|
723
|
+
directory_path: str,
|
|
724
|
+
inclusion_patterns: List[str] = None,
|
|
725
|
+
exclusion_patterns: List[str] = None,
|
|
726
|
+
max_file_size_mb: int = 50
|
|
727
|
+
) -> Dict[str, Any]:
|
|
728
|
+
"""Scan a local filesystem directory to preview what would be indexed."""
|
|
729
|
+
try:
|
|
730
|
+
payload = {
|
|
731
|
+
"directory_path": directory_path,
|
|
732
|
+
"inclusion_patterns": inclusion_patterns or [],
|
|
733
|
+
"exclusion_patterns": exclusion_patterns or [],
|
|
734
|
+
"max_file_size_mb": max_file_size_mb
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
response = await self.client.post(
|
|
738
|
+
f"{self.base_url}/v2/local-filesystem/scan",
|
|
739
|
+
json=payload
|
|
740
|
+
)
|
|
741
|
+
response.raise_for_status()
|
|
742
|
+
return response.json()
|
|
743
|
+
|
|
744
|
+
except httpx.HTTPStatusError as e:
|
|
745
|
+
raise self._handle_api_error(e)
|
|
746
|
+
except Exception as e:
|
|
747
|
+
raise APIError(f"Failed to scan local filesystem: {str(e)}")
|
|
748
|
+
|
|
749
|
+
async def check_local_filesystem_status(self, source_id: str) -> Dict[str, Any]:
|
|
750
|
+
"""Check the indexing status of a local filesystem source."""
|
|
751
|
+
try:
|
|
752
|
+
response = await self.client.get(
|
|
753
|
+
f"{self.base_url}/v2/local-filesystem/{source_id}"
|
|
754
|
+
)
|
|
755
|
+
response.raise_for_status()
|
|
756
|
+
return response.json()
|
|
757
|
+
|
|
758
|
+
except httpx.HTTPStatusError as e:
|
|
759
|
+
raise self._handle_api_error(e)
|
|
760
|
+
except Exception as e:
|
|
761
|
+
raise APIError(f"Failed to check local filesystem status: {str(e)}")
|
nia_mcp_server/server.py
CHANGED
|
@@ -225,6 +225,7 @@ async def search_codebase(
|
|
|
225
225
|
# Stream the response using unified query
|
|
226
226
|
response_parts = []
|
|
227
227
|
sources_parts = []
|
|
228
|
+
follow_up_questions = []
|
|
228
229
|
|
|
229
230
|
async for chunk in client.query_unified(
|
|
230
231
|
messages=messages,
|
|
@@ -243,6 +244,10 @@ async def search_codebase(
|
|
|
243
244
|
if "sources" in data and data["sources"]:
|
|
244
245
|
logger.debug(f"Received sources data: {type(data['sources'])}, count: {len(data['sources'])}")
|
|
245
246
|
sources_parts.extend(data["sources"])
|
|
247
|
+
|
|
248
|
+
if "follow_up_questions" in data and data["follow_up_questions"]:
|
|
249
|
+
follow_up_questions = data["follow_up_questions"]
|
|
250
|
+
logger.debug(f"Received {len(follow_up_questions)} follow-up questions")
|
|
246
251
|
|
|
247
252
|
except json.JSONDecodeError as e:
|
|
248
253
|
logger.warning(f"Failed to parse JSON chunk: {chunk}, error: {e}")
|
|
@@ -294,6 +299,13 @@ async def search_codebase(
|
|
|
294
299
|
response_text += "\n💡 **Need more details from a source?**\n\n"
|
|
295
300
|
response_text += "If you need more information from the source links provided above, use the `read_source_content` tool from the available tools provided by Nia to get full context about that particular source.\n"
|
|
296
301
|
|
|
302
|
+
# Add follow-up questions if available
|
|
303
|
+
if follow_up_questions:
|
|
304
|
+
response_text += "\n\n## 🔍 Suggested Follow-up Questions\n\n"
|
|
305
|
+
for i, question in enumerate(follow_up_questions, 1):
|
|
306
|
+
response_text += f"{i}. {question}\n"
|
|
307
|
+
response_text += "\n*These questions are based on the search results and can help you explore deeper insights.*\n"
|
|
308
|
+
|
|
297
309
|
return [TextContent(type="text", text=response_text)]
|
|
298
310
|
|
|
299
311
|
except APIError as e:
|
|
@@ -367,6 +379,7 @@ async def search_documentation(
|
|
|
367
379
|
# Stream the response using unified query
|
|
368
380
|
response_parts = []
|
|
369
381
|
sources_parts = []
|
|
382
|
+
follow_up_questions = []
|
|
370
383
|
|
|
371
384
|
async for chunk in client.query_unified(
|
|
372
385
|
messages=messages,
|
|
@@ -385,6 +398,10 @@ async def search_documentation(
|
|
|
385
398
|
if "sources" in data and data["sources"]:
|
|
386
399
|
logger.debug(f"Received doc sources data: {type(data['sources'])}, count: {len(data['sources'])}")
|
|
387
400
|
sources_parts.extend(data["sources"])
|
|
401
|
+
|
|
402
|
+
if "follow_up_questions" in data and data["follow_up_questions"]:
|
|
403
|
+
follow_up_questions = data["follow_up_questions"]
|
|
404
|
+
logger.debug(f"Received {len(follow_up_questions)} follow-up questions for documentation")
|
|
388
405
|
|
|
389
406
|
except json.JSONDecodeError as e:
|
|
390
407
|
logger.warning(f"Failed to parse JSON chunk in documentation search: {chunk}, error: {e}")
|
|
@@ -432,6 +449,13 @@ async def search_documentation(
|
|
|
432
449
|
response_text += "\n💡 **Need more details from a source?**\n\n"
|
|
433
450
|
response_text += "If you need more information from the source links provided above, use the `read_source_content` tool from the available tools provided by Nia to get full context about that particular source.\n"
|
|
434
451
|
|
|
452
|
+
# Add follow-up questions if available
|
|
453
|
+
if follow_up_questions:
|
|
454
|
+
response_text += "\n\n## 🔍 Suggested Follow-up Questions\n\n"
|
|
455
|
+
for i, question in enumerate(follow_up_questions, 1):
|
|
456
|
+
response_text += f"{i}. {question}\n"
|
|
457
|
+
response_text += "\n*These questions are based on the documentation and can help you explore related topics.*\n"
|
|
458
|
+
|
|
435
459
|
return [TextContent(type="text", text=response_text)]
|
|
436
460
|
|
|
437
461
|
except APIError as e:
|
|
@@ -497,20 +521,7 @@ async def list_repositories() -> List[TextContent]:
|
|
|
497
521
|
if repo.get("status") == "completed":
|
|
498
522
|
lines.append(f"- **Usage:** `search_codebase(query, [\"{repo_name}\"])`")
|
|
499
523
|
|
|
500
|
-
#
|
|
501
|
-
lines.extend([
|
|
502
|
-
"\n---",
|
|
503
|
-
"\n## Usage Tips",
|
|
504
|
-
"- To search all repositories: `search_codebase(\"your query\")`",
|
|
505
|
-
"- To search specific repository: `search_codebase(\"your query\", [\"owner/repo\"])`"
|
|
506
|
-
])
|
|
507
|
-
|
|
508
|
-
if has_folder_repos:
|
|
509
|
-
lines.extend([
|
|
510
|
-
"- For folder-indexed repositories: Use the EXACT repository path shown above",
|
|
511
|
-
" Example: `search_codebase(\"query\", [\"owner/repo/tree/branch/folder\"])`"
|
|
512
|
-
])
|
|
513
|
-
|
|
524
|
+
# Return without usage tips
|
|
514
525
|
return [TextContent(type="text", text="\n".join(lines))]
|
|
515
526
|
|
|
516
527
|
except APIError as e:
|
|
@@ -1622,6 +1633,408 @@ async def read_source_content(
|
|
|
1622
1633
|
text=f"❌ Error reading source content: {str(e)}"
|
|
1623
1634
|
)]
|
|
1624
1635
|
|
|
1636
|
+
@mcp.tool()
|
|
1637
|
+
async def index_local_filesystem(
|
|
1638
|
+
directory_path: str,
|
|
1639
|
+
inclusion_patterns: Optional[List[str]] = None,
|
|
1640
|
+
exclusion_patterns: Optional[List[str]] = None,
|
|
1641
|
+
max_file_size_mb: int = 50
|
|
1642
|
+
) -> List[TextContent]:
|
|
1643
|
+
"""
|
|
1644
|
+
Index a local filesystem directory for intelligent search.
|
|
1645
|
+
|
|
1646
|
+
Args:
|
|
1647
|
+
directory_path: Absolute path to the directory to index
|
|
1648
|
+
inclusion_patterns: Optional list of patterns to include (e.g., ["ext:.py", "dir:src"])
|
|
1649
|
+
exclusion_patterns: Optional list of patterns to exclude (e.g., ["dir:node_modules", "ext:.log"])
|
|
1650
|
+
max_file_size_mb: Maximum file size in MB to process (default: 50)
|
|
1651
|
+
|
|
1652
|
+
Returns:
|
|
1653
|
+
Status of the indexing operation
|
|
1654
|
+
|
|
1655
|
+
Important:
|
|
1656
|
+
- Path must be absolute (e.g., /Users/username/projects/myproject)
|
|
1657
|
+
- When indexing starts, use check_local_filesystem_status tool to monitor progress
|
|
1658
|
+
"""
|
|
1659
|
+
try:
|
|
1660
|
+
# Validate absolute path
|
|
1661
|
+
if not os.path.isabs(directory_path):
|
|
1662
|
+
return [TextContent(
|
|
1663
|
+
type="text",
|
|
1664
|
+
text=f"❌ Error: directory_path must be an absolute path. Got: {directory_path}\n\n"
|
|
1665
|
+
f"Example: /Users/username/projects/myproject"
|
|
1666
|
+
)]
|
|
1667
|
+
|
|
1668
|
+
client = await ensure_api_client()
|
|
1669
|
+
|
|
1670
|
+
# Start indexing
|
|
1671
|
+
logger.info(f"Starting to index local directory: {directory_path}")
|
|
1672
|
+
result = await client.index_local_filesystem(
|
|
1673
|
+
directory_path=directory_path,
|
|
1674
|
+
inclusion_patterns=inclusion_patterns or [],
|
|
1675
|
+
exclusion_patterns=exclusion_patterns or [],
|
|
1676
|
+
max_file_size_mb=max_file_size_mb
|
|
1677
|
+
)
|
|
1678
|
+
|
|
1679
|
+
if result.get("success"):
|
|
1680
|
+
source_id = result["data"]["source_id"]
|
|
1681
|
+
status_url = result["data"]["status_url"]
|
|
1682
|
+
|
|
1683
|
+
return [TextContent(
|
|
1684
|
+
type="text",
|
|
1685
|
+
text=(
|
|
1686
|
+
f"✅ Successfully started indexing local directory!\n\n"
|
|
1687
|
+
f"📁 **Directory:** `{directory_path}`\n"
|
|
1688
|
+
f"🆔 **Source ID:** `{source_id}`\n"
|
|
1689
|
+
f"📊 **Status:** Processing\n\n"
|
|
1690
|
+
f"**What happens next:**\n"
|
|
1691
|
+
f"• NIA is scanning and indexing your files in the background\n"
|
|
1692
|
+
f"• This process typically takes a few minutes depending on directory size\n"
|
|
1693
|
+
f"• Use `check_local_filesystem_status` with source ID `{source_id}` to monitor progress\n"
|
|
1694
|
+
f"• Once indexed, use `search_codebase` or `search_documentation` to search your files\n\n"
|
|
1695
|
+
f"📌 **Tip:** You can check the status at any time or visit [app.trynia.ai](https://app.trynia.ai) to monitor progress."
|
|
1696
|
+
)
|
|
1697
|
+
)]
|
|
1698
|
+
else:
|
|
1699
|
+
return [TextContent(
|
|
1700
|
+
type="text",
|
|
1701
|
+
text=f"❌ Failed to start indexing: {result.get('detail', 'Unknown error')}"
|
|
1702
|
+
)]
|
|
1703
|
+
|
|
1704
|
+
except APIError as e:
|
|
1705
|
+
logger.error(f"API error indexing local filesystem: {e}")
|
|
1706
|
+
return [TextContent(
|
|
1707
|
+
type="text",
|
|
1708
|
+
text=f"❌ API Error: {str(e)}\n\nStatus Code: {e.status_code}\nDetails: {e.detail}"
|
|
1709
|
+
)]
|
|
1710
|
+
except Exception as e:
|
|
1711
|
+
logger.error(f"Unexpected error indexing local filesystem: {e}")
|
|
1712
|
+
return [TextContent(
|
|
1713
|
+
type="text",
|
|
1714
|
+
text=f"❌ Error: An unexpected error occurred while indexing the directory: {str(e)}"
|
|
1715
|
+
)]
|
|
1716
|
+
|
|
1717
|
+
@mcp.tool()
|
|
1718
|
+
async def scan_local_filesystem(
|
|
1719
|
+
directory_path: str,
|
|
1720
|
+
inclusion_patterns: Optional[List[str]] = None,
|
|
1721
|
+
exclusion_patterns: Optional[List[str]] = None,
|
|
1722
|
+
max_file_size_mb: int = 50
|
|
1723
|
+
) -> List[TextContent]:
|
|
1724
|
+
"""
|
|
1725
|
+
Scan a local filesystem directory to preview what files would be indexed.
|
|
1726
|
+
|
|
1727
|
+
This tool helps you understand what files will be processed before actually indexing.
|
|
1728
|
+
|
|
1729
|
+
Args:
|
|
1730
|
+
directory_path: Absolute path to the directory to scan
|
|
1731
|
+
inclusion_patterns: Optional list of patterns to include (e.g., ["ext:.py", "dir:src"])
|
|
1732
|
+
exclusion_patterns: Optional list of patterns to exclude (e.g., ["dir:node_modules", "ext:.log"])
|
|
1733
|
+
max_file_size_mb: Maximum file size in MB to process (default: 50)
|
|
1734
|
+
|
|
1735
|
+
Returns:
|
|
1736
|
+
Summary of files that would be indexed including count, size, and file types
|
|
1737
|
+
"""
|
|
1738
|
+
try:
|
|
1739
|
+
# Validate absolute path
|
|
1740
|
+
if not os.path.isabs(directory_path):
|
|
1741
|
+
return [TextContent(
|
|
1742
|
+
type="text",
|
|
1743
|
+
text=f"❌ Error: directory_path must be an absolute path. Got: {directory_path}\n\n"
|
|
1744
|
+
f"Example: /Users/username/projects/myproject"
|
|
1745
|
+
)]
|
|
1746
|
+
|
|
1747
|
+
client = await ensure_api_client()
|
|
1748
|
+
|
|
1749
|
+
logger.info(f"Scanning local directory: {directory_path}")
|
|
1750
|
+
result = await client.scan_local_filesystem(
|
|
1751
|
+
directory_path=directory_path,
|
|
1752
|
+
inclusion_patterns=inclusion_patterns or [],
|
|
1753
|
+
exclusion_patterns=exclusion_patterns or [],
|
|
1754
|
+
max_file_size_mb=max_file_size_mb
|
|
1755
|
+
)
|
|
1756
|
+
|
|
1757
|
+
# Format the scan results
|
|
1758
|
+
total_files = result.get("total_files", 0)
|
|
1759
|
+
total_size_mb = result.get("total_size_mb", 0)
|
|
1760
|
+
file_types = result.get("file_types", {})
|
|
1761
|
+
files = result.get("files", [])
|
|
1762
|
+
truncated = result.get("truncated", False)
|
|
1763
|
+
|
|
1764
|
+
response = f"📊 **Local Directory Scan Results**\n\n"
|
|
1765
|
+
response += f"📁 **Directory:** `{directory_path}`\n"
|
|
1766
|
+
response += f"📄 **Total Files:** {total_files:,}\n"
|
|
1767
|
+
response += f"💾 **Total Size:** {total_size_mb:.2f} MB\n\n"
|
|
1768
|
+
|
|
1769
|
+
if file_types:
|
|
1770
|
+
response += "**File Types:**\n"
|
|
1771
|
+
# Sort by count descending
|
|
1772
|
+
sorted_types = sorted(file_types.items(), key=lambda x: x[1], reverse=True)
|
|
1773
|
+
for ext, count in sorted_types[:10]: # Show top 10
|
|
1774
|
+
response += f"• `{ext}`: {count:,} files\n"
|
|
1775
|
+
if len(sorted_types) > 10:
|
|
1776
|
+
response += f"• ... and {len(sorted_types) - 10} more types\n"
|
|
1777
|
+
response += "\n"
|
|
1778
|
+
|
|
1779
|
+
if files:
|
|
1780
|
+
response += f"**Largest Files (showing {min(len(files), 10)}):**\n"
|
|
1781
|
+
for i, file_info in enumerate(files[:10]):
|
|
1782
|
+
size_mb = file_info["size"] / (1024 * 1024)
|
|
1783
|
+
response += f"{i+1}. `{file_info['path']}` ({size_mb:.2f} MB)\n"
|
|
1784
|
+
|
|
1785
|
+
if truncated:
|
|
1786
|
+
response += f"\n*Note: Showing first 100 files out of {total_files:,} total*\n"
|
|
1787
|
+
|
|
1788
|
+
if inclusion_patterns:
|
|
1789
|
+
response += f"\n**Inclusion Patterns:** {', '.join(f'`{p}`' for p in inclusion_patterns)}\n"
|
|
1790
|
+
if exclusion_patterns:
|
|
1791
|
+
response += f"**Exclusion Patterns:** {', '.join(f'`{p}`' for p in exclusion_patterns)}\n"
|
|
1792
|
+
|
|
1793
|
+
response += "\n💡 **Next Step:** Use `index_local_filesystem` to index these files."
|
|
1794
|
+
|
|
1795
|
+
return [TextContent(type="text", text=response)]
|
|
1796
|
+
|
|
1797
|
+
except APIError as e:
|
|
1798
|
+
logger.error(f"API error scanning local filesystem: {e}")
|
|
1799
|
+
return [TextContent(
|
|
1800
|
+
type="text",
|
|
1801
|
+
text=f"❌ API Error: {str(e)}\n\nStatus Code: {e.status_code}\nDetails: {e.detail}"
|
|
1802
|
+
)]
|
|
1803
|
+
except Exception as e:
|
|
1804
|
+
logger.error(f"Unexpected error scanning local filesystem: {e}")
|
|
1805
|
+
return [TextContent(
|
|
1806
|
+
type="text",
|
|
1807
|
+
text=f"❌ Error: An unexpected error occurred while scanning: {str(e)}"
|
|
1808
|
+
)]
|
|
1809
|
+
|
|
1810
|
+
@mcp.tool()
|
|
1811
|
+
async def check_local_filesystem_status(source_id: str) -> List[TextContent]:
|
|
1812
|
+
"""
|
|
1813
|
+
Check the indexing status of a local filesystem source.
|
|
1814
|
+
|
|
1815
|
+
Args:
|
|
1816
|
+
source_id: The source ID returned when indexing was started
|
|
1817
|
+
|
|
1818
|
+
Returns:
|
|
1819
|
+
Current status of the local filesystem indexing
|
|
1820
|
+
"""
|
|
1821
|
+
try:
|
|
1822
|
+
client = await ensure_api_client()
|
|
1823
|
+
status = await client.check_local_filesystem_status(source_id)
|
|
1824
|
+
|
|
1825
|
+
# Format status response
|
|
1826
|
+
status_text = status.get("status", "unknown")
|
|
1827
|
+
progress = status.get("progress", 0)
|
|
1828
|
+
message = status.get("message", "")
|
|
1829
|
+
error = status.get("error")
|
|
1830
|
+
directory_path = status.get("directory_path", "Unknown")
|
|
1831
|
+
page_count = status.get("page_count", 0) # Number of files
|
|
1832
|
+
chunk_count = status.get("chunk_count", 0)
|
|
1833
|
+
|
|
1834
|
+
# Status emoji
|
|
1835
|
+
status_emoji = {
|
|
1836
|
+
"pending": "⏳",
|
|
1837
|
+
"processing": "🔄",
|
|
1838
|
+
"completed": "✅",
|
|
1839
|
+
"failed": "❌",
|
|
1840
|
+
"error": "❌"
|
|
1841
|
+
}.get(status_text, "❓")
|
|
1842
|
+
|
|
1843
|
+
response = f"{status_emoji} **Local Filesystem Status**\n\n"
|
|
1844
|
+
response += f"🆔 **Source ID:** `{source_id}`\n"
|
|
1845
|
+
response += f"📁 **Directory:** `{directory_path}`\n"
|
|
1846
|
+
response += f"📊 **Status:** {status_text.capitalize()}\n"
|
|
1847
|
+
|
|
1848
|
+
if progress > 0:
|
|
1849
|
+
response += f"📈 **Progress:** {progress}%\n"
|
|
1850
|
+
|
|
1851
|
+
if message:
|
|
1852
|
+
response += f"💬 **Message:** {message}\n"
|
|
1853
|
+
|
|
1854
|
+
if status_text == "completed":
|
|
1855
|
+
response += f"\n✨ **Indexing Complete!**\n"
|
|
1856
|
+
response += f"• **Files Indexed:** {page_count:,}\n"
|
|
1857
|
+
response += f"• **Chunks Created:** {chunk_count:,}\n"
|
|
1858
|
+
response += f"\nYou can now search this directory using `search_codebase` or the unified search!"
|
|
1859
|
+
elif status_text in ["failed", "error"]:
|
|
1860
|
+
response += f"\n❌ **Indexing Failed**\n"
|
|
1861
|
+
if error:
|
|
1862
|
+
response += f"**Error:** {error}\n"
|
|
1863
|
+
response += "\nPlease check your directory path and try again."
|
|
1864
|
+
elif status_text == "processing":
|
|
1865
|
+
response += f"\n🔄 Indexing is in progress...\n"
|
|
1866
|
+
response += "Check back in a few moments or monitor at [app.trynia.ai](https://app.trynia.ai)"
|
|
1867
|
+
|
|
1868
|
+
return [TextContent(type="text", text=response)]
|
|
1869
|
+
|
|
1870
|
+
except APIError as e:
|
|
1871
|
+
logger.error(f"API error checking local filesystem status: {e}")
|
|
1872
|
+
if e.status_code == 404:
|
|
1873
|
+
return [TextContent(
|
|
1874
|
+
type="text",
|
|
1875
|
+
text=f"❌ Source ID `{source_id}` not found. Please check the ID and try again."
|
|
1876
|
+
)]
|
|
1877
|
+
return [TextContent(
|
|
1878
|
+
type="text",
|
|
1879
|
+
text=f"❌ API Error: {str(e)}\n\nStatus Code: {e.status_code}\nDetails: {e.detail}"
|
|
1880
|
+
)]
|
|
1881
|
+
except Exception as e:
|
|
1882
|
+
logger.error(f"Unexpected error checking local filesystem status: {e}")
|
|
1883
|
+
return [TextContent(
|
|
1884
|
+
type="text",
|
|
1885
|
+
text=f"❌ Error: An unexpected error occurred: {str(e)}"
|
|
1886
|
+
)]
|
|
1887
|
+
|
|
1888
|
+
@mcp.tool()
|
|
1889
|
+
async def search_local_filesystem(
|
|
1890
|
+
source_id: str,
|
|
1891
|
+
query: str,
|
|
1892
|
+
include_sources: bool = True
|
|
1893
|
+
) -> List[TextContent]:
|
|
1894
|
+
"""
|
|
1895
|
+
Search an indexed local filesystem directory using its source ID.
|
|
1896
|
+
|
|
1897
|
+
To search local files:
|
|
1898
|
+
1. First index a directory using `index_local_filesystem` - this will return a source_id
|
|
1899
|
+
2. Use that source_id with this tool to search the indexed content
|
|
1900
|
+
|
|
1901
|
+
Args:
|
|
1902
|
+
source_id: The source ID returned when the directory was indexed (required)
|
|
1903
|
+
query: Your search query in natural language (required)
|
|
1904
|
+
include_sources: Whether to include source code snippets in results (default: True)
|
|
1905
|
+
|
|
1906
|
+
Returns:
|
|
1907
|
+
Search results with relevant file snippets and explanations
|
|
1908
|
+
|
|
1909
|
+
Example:
|
|
1910
|
+
# After indexing returns source_id "abc123-def456"
|
|
1911
|
+
search_local_filesystem(
|
|
1912
|
+
source_id="abc123-def456",
|
|
1913
|
+
query="configuration settings"
|
|
1914
|
+
)
|
|
1915
|
+
|
|
1916
|
+
Note: To find your source IDs, use `list_documentation` and look for
|
|
1917
|
+
sources with source_type="local_filesystem"
|
|
1918
|
+
"""
|
|
1919
|
+
try:
|
|
1920
|
+
# Validate inputs
|
|
1921
|
+
if not source_id:
|
|
1922
|
+
return [TextContent(
|
|
1923
|
+
type="text",
|
|
1924
|
+
text="❌ Error: 'source_id' parameter is required. Use the ID returned from index_local_filesystem."
|
|
1925
|
+
)]
|
|
1926
|
+
|
|
1927
|
+
if not query:
|
|
1928
|
+
return [TextContent(
|
|
1929
|
+
type="text",
|
|
1930
|
+
text="❌ Error: 'query' parameter is required"
|
|
1931
|
+
)]
|
|
1932
|
+
|
|
1933
|
+
client = await ensure_api_client()
|
|
1934
|
+
|
|
1935
|
+
# Check if the source exists and is ready
|
|
1936
|
+
logger.info(f"Checking status of source {source_id}")
|
|
1937
|
+
try:
|
|
1938
|
+
status = await client.get_data_source_status(source_id)
|
|
1939
|
+
if not status:
|
|
1940
|
+
return [TextContent(
|
|
1941
|
+
type="text",
|
|
1942
|
+
text=f"❌ Source ID '{source_id}' not found. Please check the ID and try again."
|
|
1943
|
+
)]
|
|
1944
|
+
|
|
1945
|
+
source_status = status.get("status", "unknown")
|
|
1946
|
+
if source_status == "processing":
|
|
1947
|
+
progress = status.get("progress", 0)
|
|
1948
|
+
return [TextContent(
|
|
1949
|
+
type="text",
|
|
1950
|
+
text=f"⏳ This source is still being indexed ({progress}% complete).\n\n"
|
|
1951
|
+
f"Use `check_local_filesystem_status(\"{source_id}\")` to check progress."
|
|
1952
|
+
)]
|
|
1953
|
+
elif source_status == "failed":
|
|
1954
|
+
error = status.get("error", "Unknown error")
|
|
1955
|
+
return [TextContent(
|
|
1956
|
+
type="text",
|
|
1957
|
+
text=f"❌ This source failed to index.\n\nError: {error}"
|
|
1958
|
+
)]
|
|
1959
|
+
elif source_status != "completed":
|
|
1960
|
+
return [TextContent(
|
|
1961
|
+
type="text",
|
|
1962
|
+
text=f"❌ Source is not ready for search. Status: {source_status}"
|
|
1963
|
+
)]
|
|
1964
|
+
except Exception as e:
|
|
1965
|
+
logger.warning(f"Could not check source status: {e}")
|
|
1966
|
+
# Continue anyway in case it's just a status check issue
|
|
1967
|
+
|
|
1968
|
+
# Perform the search
|
|
1969
|
+
logger.info(f"Searching local filesystem source {source_id} with query: {query}")
|
|
1970
|
+
|
|
1971
|
+
# Use the unified query endpoint with data_sources parameter
|
|
1972
|
+
result = client.query_unified(
|
|
1973
|
+
messages=[{"role": "user", "content": query}],
|
|
1974
|
+
data_sources=[source_id],
|
|
1975
|
+
include_sources=include_sources,
|
|
1976
|
+
stream=False
|
|
1977
|
+
)
|
|
1978
|
+
|
|
1979
|
+
# Parse the response
|
|
1980
|
+
response_text = ""
|
|
1981
|
+
async for chunk in result:
|
|
1982
|
+
data = json.loads(chunk)
|
|
1983
|
+
if "content" in data:
|
|
1984
|
+
response_text = data["content"]
|
|
1985
|
+
sources = data.get("sources", [])
|
|
1986
|
+
break
|
|
1987
|
+
|
|
1988
|
+
# Format the response nicely for local filesystem results
|
|
1989
|
+
if response_text:
|
|
1990
|
+
# Extract the local filesystem results section if present
|
|
1991
|
+
if "**Local filesystem results" in response_text:
|
|
1992
|
+
# Keep the original response
|
|
1993
|
+
formatted_response = response_text
|
|
1994
|
+
else:
|
|
1995
|
+
# Create our own formatted response
|
|
1996
|
+
formatted_response = f"🔍 **Search Results for Local Directory**\n"
|
|
1997
|
+
formatted_response += f"🔎 Query: \"{query}\"\n\n"
|
|
1998
|
+
formatted_response += response_text
|
|
1999
|
+
|
|
2000
|
+
# Add sources if available and requested
|
|
2001
|
+
if include_sources and sources:
|
|
2002
|
+
formatted_response += "\n\n**📄 Source Details:**\n"
|
|
2003
|
+
for i, source in enumerate(sources[:5], 1):
|
|
2004
|
+
metadata = source.get("metadata", {})
|
|
2005
|
+
file_path = metadata.get("file_path", "Unknown file")
|
|
2006
|
+
formatted_response += f"\n{i}. `{file_path}`\n"
|
|
2007
|
+
|
|
2008
|
+
# Add snippet of content
|
|
2009
|
+
content = source.get("content", "")
|
|
2010
|
+
if content:
|
|
2011
|
+
# Truncate to reasonable length
|
|
2012
|
+
lines = content.split('\n')[:10]
|
|
2013
|
+
snippet = '\n'.join(lines)
|
|
2014
|
+
if len(lines) > 10:
|
|
2015
|
+
snippet += "\n..."
|
|
2016
|
+
formatted_response += f"```\n{snippet}\n```\n"
|
|
2017
|
+
|
|
2018
|
+
return [TextContent(type="text", text=formatted_response)]
|
|
2019
|
+
else:
|
|
2020
|
+
return [TextContent(
|
|
2021
|
+
type="text",
|
|
2022
|
+
text=f"No results found for query: \"{query}\" in the indexed directory."
|
|
2023
|
+
)]
|
|
2024
|
+
|
|
2025
|
+
except APIError as e:
|
|
2026
|
+
logger.error(f"API error searching local filesystem: {e}")
|
|
2027
|
+
return [TextContent(
|
|
2028
|
+
type="text",
|
|
2029
|
+
text=f"❌ API Error: {str(e)}\n\nStatus Code: {e.status_code}\nDetails: {e.detail}"
|
|
2030
|
+
)]
|
|
2031
|
+
except Exception as e:
|
|
2032
|
+
logger.error(f"Unexpected error searching local filesystem: {e}")
|
|
2033
|
+
return [TextContent(
|
|
2034
|
+
type="text",
|
|
2035
|
+
text=f"❌ Error: An unexpected error occurred: {str(e)}"
|
|
2036
|
+
)]
|
|
2037
|
+
|
|
1625
2038
|
@mcp.tool()
|
|
1626
2039
|
async def visualize_codebase(
|
|
1627
2040
|
repository: str
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
nia_mcp_server/__init__.py,sha256=
|
|
1
|
+
nia_mcp_server/__init__.py,sha256=_QdlhBenOh3kbV3EalKuqZxEBxmPq3i71u8R03FMYSs,85
|
|
2
2
|
nia_mcp_server/__main__.py,sha256=YQSpFtDeKp18r8mKr084cHnRFV4416_EKCu9FTM8_ik,394
|
|
3
|
-
nia_mcp_server/api_client.py,sha256=
|
|
3
|
+
nia_mcp_server/api_client.py,sha256=L9xf7rz6i2aPKKLfYOwj8Fs0_SYqJHHZgTki0y3UtcY,30415
|
|
4
4
|
nia_mcp_server/cli.py,sha256=32VSPNIocXtDgVBDZNZsxvj3kytBn54_a1pIE84vOdY,1834
|
|
5
5
|
nia_mcp_server/profiles.py,sha256=2DD8PFRr5Ij4IK4sPUz0mH8aKjkrEtkKLC1R0iki2bA,7221
|
|
6
6
|
nia_mcp_server/project_init.py,sha256=T0-ziJhofL4L8APwnM43BLhxtlmOHaYH-V9PF2yXLw4,7138
|
|
7
7
|
nia_mcp_server/rule_transformer.py,sha256=wCxoQ1Kl_rI9mUFnh9kG5iCXYU4QInrmFQOReZfAFVo,11000
|
|
8
|
-
nia_mcp_server/server.py,sha256=
|
|
8
|
+
nia_mcp_server/server.py,sha256=oGNcJKU87_5GlsoAGmr7Bqn6RWQb2Vy8onkJ0HeB6oQ,95601
|
|
9
9
|
nia_mcp_server/setup.py,sha256=nJXVY8NHGtWROtoH8DW-3uOgyuPs4F9dW0cNhcbCLrM,5355
|
|
10
10
|
nia_mcp_server/assets/rules/claude_rules.md,sha256=HNL5GJMUbFxSpNbIAJUQWqAywjMl4lf530I1in69aNY,7380
|
|
11
11
|
nia_mcp_server/assets/rules/cursor_rules.md,sha256=hd6lhzNrK1ULQUYIEVeOnyKnuLKq4hmwZPbMqGUI1Lk,1720
|
|
12
12
|
nia_mcp_server/assets/rules/nia_rules.md,sha256=l6sx000uqoczoHYqOPp4hnNgyfpnhvO9NyT0fVx5nU0,8059
|
|
13
13
|
nia_mcp_server/assets/rules/vscode_rules.md,sha256=fqn4aJO_bhftaCGkVoquruQHf3EaREQJQWHXq6a4FOk,6967
|
|
14
14
|
nia_mcp_server/assets/rules/windsurf_rules.md,sha256=PzU2as5gaiVsV6PAzg8T_-GR7VCyRQGMjAHcSzYF_ms,3354
|
|
15
|
-
nia_mcp_server-1.0.
|
|
16
|
-
nia_mcp_server-1.0.
|
|
17
|
-
nia_mcp_server-1.0.
|
|
18
|
-
nia_mcp_server-1.0.
|
|
19
|
-
nia_mcp_server-1.0.
|
|
15
|
+
nia_mcp_server-1.0.18.dist-info/METADATA,sha256=Ptsenk9rz-XEmqygQHZUEYAkDEZkhqhdR3isWhMx7GQ,1324
|
|
16
|
+
nia_mcp_server-1.0.18.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
17
|
+
nia_mcp_server-1.0.18.dist-info/entry_points.txt,sha256=V74FQEp48pfWxPCl7B9mihtqvIJNVjCSbRfCz4ww77I,64
|
|
18
|
+
nia_mcp_server-1.0.18.dist-info/licenses/LICENSE,sha256=IrdVKi3bsiB2MTLM26MltBRpwyNi-8P6Cy0EnmAN76A,1557
|
|
19
|
+
nia_mcp_server-1.0.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|