nia-mcp-server 1.0.1__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nia-mcp-server might be problematic. Click here for more details.
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/PKG-INFO +1 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/pyproject.toml +1 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/src/nia_mcp_server/__init__.py +1 -1
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/src/nia_mcp_server/api_client.py +63 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/src/nia_mcp_server/server.py +198 -296
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/.gitignore +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/ARCHITECTURE.md +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/LICENSE +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/README.md +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.4}/src/nia_mcp_server/__main__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nia-mcp-server
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.4
|
|
4
4
|
Summary: NIA Knowledge Agent - MCP server for intelligent codebase search
|
|
5
5
|
Project-URL: Homepage, https://trynia.ai
|
|
6
6
|
Project-URL: Documentation, https://docs.trynia.ai
|
|
@@ -20,7 +20,6 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
20
20
|
Classifier: Programming Language :: Python :: 3.11
|
|
21
21
|
Classifier: Programming Language :: Python :: 3.12
|
|
22
22
|
Requires-Python: >=3.8
|
|
23
|
-
Requires-Dist: exa-py>=1.0.8
|
|
24
23
|
Requires-Dist: httpx>=0.24.0
|
|
25
24
|
Requires-Dist: mcp>=0.1.0
|
|
26
25
|
Requires-Dist: pydantic>=2.0.0
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "nia-mcp-server"
|
|
7
|
-
version = "1.0.
|
|
7
|
+
version = "1.0.4"
|
|
8
8
|
description = "NIA Knowledge Agent - MCP server for intelligent codebase search"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -30,7 +30,6 @@ dependencies = [
|
|
|
30
30
|
"httpx>=0.24.0",
|
|
31
31
|
"pydantic>=2.0.0",
|
|
32
32
|
"python-dotenv>=1.0.0",
|
|
33
|
-
"exa-py>=1.0.8",
|
|
34
33
|
]
|
|
35
34
|
|
|
36
35
|
[project.urls]
|
|
@@ -31,7 +31,7 @@ class NIAApiClient:
|
|
|
31
31
|
"User-Agent": "nia-mcp-server/1.0.0",
|
|
32
32
|
"Content-Type": "application/json"
|
|
33
33
|
},
|
|
34
|
-
timeout=
|
|
34
|
+
timeout=720.0 # 12 minute timeout for deep research operations
|
|
35
35
|
)
|
|
36
36
|
|
|
37
37
|
async def close(self):
|
|
@@ -474,4 +474,65 @@ class NIAApiClient:
|
|
|
474
474
|
except httpx.HTTPStatusError as e:
|
|
475
475
|
raise self._handle_api_error(e)
|
|
476
476
|
except Exception as e:
|
|
477
|
-
raise APIError(f"Query failed: {str(e)}")
|
|
477
|
+
raise APIError(f"Query failed: {str(e)}")
|
|
478
|
+
|
|
479
|
+
async def web_search(
|
|
480
|
+
self,
|
|
481
|
+
query: str,
|
|
482
|
+
num_results: int = 5,
|
|
483
|
+
category: Optional[str] = None,
|
|
484
|
+
days_back: Optional[int] = None,
|
|
485
|
+
find_similar_to: Optional[str] = None
|
|
486
|
+
) -> Dict[str, Any]:
|
|
487
|
+
"""Perform AI-powered web search."""
|
|
488
|
+
try:
|
|
489
|
+
payload = {
|
|
490
|
+
"query": query,
|
|
491
|
+
"num_results": min(num_results, 10),
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
# Add optional parameters
|
|
495
|
+
if category:
|
|
496
|
+
payload["category"] = category
|
|
497
|
+
if days_back:
|
|
498
|
+
payload["days_back"] = days_back
|
|
499
|
+
if find_similar_to:
|
|
500
|
+
payload["find_similar_to"] = find_similar_to
|
|
501
|
+
|
|
502
|
+
response = await self.client.post(
|
|
503
|
+
f"{self.base_url}/v2/web-search",
|
|
504
|
+
json=payload
|
|
505
|
+
)
|
|
506
|
+
response.raise_for_status()
|
|
507
|
+
return response.json()
|
|
508
|
+
|
|
509
|
+
except httpx.HTTPStatusError as e:
|
|
510
|
+
raise self._handle_api_error(e)
|
|
511
|
+
except Exception as e:
|
|
512
|
+
raise APIError(f"Web search failed: {str(e)}")
|
|
513
|
+
|
|
514
|
+
async def deep_research(
|
|
515
|
+
self,
|
|
516
|
+
query: str,
|
|
517
|
+
output_format: Optional[str] = None
|
|
518
|
+
) -> Dict[str, Any]:
|
|
519
|
+
"""Perform deep research using AI agent."""
|
|
520
|
+
try:
|
|
521
|
+
payload = {
|
|
522
|
+
"query": query,
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
if output_format:
|
|
526
|
+
payload["output_format"] = output_format
|
|
527
|
+
|
|
528
|
+
response = await self.client.post(
|
|
529
|
+
f"{self.base_url}/v2/deep-research",
|
|
530
|
+
json=payload
|
|
531
|
+
)
|
|
532
|
+
response.raise_for_status()
|
|
533
|
+
return response.json()
|
|
534
|
+
|
|
535
|
+
except httpx.HTTPStatusError as e:
|
|
536
|
+
raise self._handle_api_error(e)
|
|
537
|
+
except Exception as e:
|
|
538
|
+
raise APIError(f"Deep research failed: {str(e)}")
|
|
@@ -13,6 +13,7 @@ from mcp.server.fastmcp import FastMCP
|
|
|
13
13
|
from mcp.types import TextContent, Resource
|
|
14
14
|
from .api_client import NIAApiClient, APIError
|
|
15
15
|
from dotenv import load_dotenv
|
|
16
|
+
import json
|
|
16
17
|
|
|
17
18
|
# Load .env from parent directory (nia-app/.env)
|
|
18
19
|
from pathlib import Path
|
|
@@ -150,7 +151,7 @@ async def search_codebase(
|
|
|
150
151
|
Search indexed repositories using natural language.
|
|
151
152
|
|
|
152
153
|
Args:
|
|
153
|
-
query: Natural language search query
|
|
154
|
+
query: Natural language search query. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
|
|
154
155
|
repositories: List of repositories to search (owner/repo format). If not specified, searches all indexed repos.
|
|
155
156
|
include_sources: Whether to include source code in results
|
|
156
157
|
|
|
@@ -252,11 +253,11 @@ async def search_documentation(
|
|
|
252
253
|
include_sources: bool = True
|
|
253
254
|
) -> List[TextContent]:
|
|
254
255
|
"""
|
|
255
|
-
Search indexed documentation using natural language.
|
|
256
|
+
Search indexed documentation using natural language.
|
|
256
257
|
|
|
257
258
|
Args:
|
|
258
|
-
query: Natural language search query
|
|
259
|
-
sources: List of documentation source IDs to search.
|
|
259
|
+
query: Natural language search query. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
|
|
260
|
+
sources: List of documentation source IDs to search. Use it based on user's query.
|
|
260
261
|
include_sources: Whether to include source references in results
|
|
261
262
|
|
|
262
263
|
Returns:
|
|
@@ -755,215 +756,132 @@ async def nia_web_search(
|
|
|
755
756
|
Search results with actionable next steps
|
|
756
757
|
"""
|
|
757
758
|
try:
|
|
758
|
-
|
|
759
|
-
api_key = os.getenv("EXA_API_KEY")
|
|
760
|
-
if not api_key:
|
|
761
|
-
return [TextContent(
|
|
762
|
-
type="text",
|
|
763
|
-
text="❌ NIA Web Search unavailable.\n\n"
|
|
764
|
-
"This feature requires additional configuration. "
|
|
765
|
-
"Please contact support for access to advanced search features."
|
|
766
|
-
)]
|
|
767
|
-
|
|
768
|
-
# Import client
|
|
769
|
-
try:
|
|
770
|
-
from exa_py import Exa
|
|
771
|
-
except ImportError:
|
|
772
|
-
return [TextContent(
|
|
773
|
-
type="text",
|
|
774
|
-
text="❌ NIA Web Search unavailable. Please update the NIA MCP server."
|
|
775
|
-
)]
|
|
776
|
-
|
|
777
|
-
# Initialize client
|
|
778
|
-
client = Exa(api_key)
|
|
779
|
-
|
|
780
|
-
# Limit results to reasonable number
|
|
781
|
-
num_results = min(num_results, 10)
|
|
759
|
+
client = await ensure_api_client()
|
|
782
760
|
|
|
783
761
|
logger.info(f"Searching content for query: {query}")
|
|
784
762
|
|
|
785
|
-
#
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
# Add category filter if specified
|
|
794
|
-
if category:
|
|
795
|
-
# Map user-friendly categories to Exa categories
|
|
796
|
-
category_map = {
|
|
797
|
-
"github": "github",
|
|
798
|
-
"company": "company",
|
|
799
|
-
"research": "research paper",
|
|
800
|
-
"news": "news",
|
|
801
|
-
"tweet": "tweet",
|
|
802
|
-
"pdf": "pdf",
|
|
803
|
-
"blog": "personal site"
|
|
804
|
-
}
|
|
805
|
-
if category.lower() in category_map:
|
|
806
|
-
search_params["category"] = category_map[category.lower()]
|
|
807
|
-
|
|
808
|
-
# Add date filter for trending content
|
|
809
|
-
if days_back:
|
|
810
|
-
from datetime import datetime, timedelta
|
|
811
|
-
start_date = (datetime.now() - timedelta(days=days_back)).strftime("%Y-%m-%d")
|
|
812
|
-
search_params["start_published_date"] = start_date
|
|
813
|
-
|
|
814
|
-
# Use similarity search if URL provided
|
|
815
|
-
if find_similar_to:
|
|
816
|
-
results = client.find_similar_and_contents(
|
|
817
|
-
find_similar_to,
|
|
818
|
-
**search_params
|
|
819
|
-
)
|
|
820
|
-
else:
|
|
821
|
-
results = client.search_and_contents(
|
|
822
|
-
query,
|
|
823
|
-
**search_params
|
|
824
|
-
)
|
|
763
|
+
# Use the API client method instead of direct HTTP call
|
|
764
|
+
result = await client.web_search(
|
|
765
|
+
query=query,
|
|
766
|
+
num_results=num_results,
|
|
767
|
+
category=category,
|
|
768
|
+
days_back=days_back,
|
|
769
|
+
find_similar_to=find_similar_to
|
|
770
|
+
)
|
|
825
771
|
|
|
826
|
-
#
|
|
827
|
-
github_repos = []
|
|
828
|
-
documentation = []
|
|
829
|
-
other_content = []
|
|
830
|
-
|
|
831
|
-
for result in results.results:
|
|
832
|
-
url = result.url
|
|
833
|
-
|
|
834
|
-
# Categorize based on URL patterns
|
|
835
|
-
if "github.com" in url and "/tree/" not in url and "/blob/" not in url:
|
|
836
|
-
# It's a GitHub repo (not a specific file)
|
|
837
|
-
# Extract owner/repo from URL
|
|
838
|
-
try:
|
|
839
|
-
parsed = urlparse(url)
|
|
840
|
-
# Ensure we have a valid GitHub URL
|
|
841
|
-
if parsed.hostname == "github.com":
|
|
842
|
-
# Remove leading/trailing slashes and split the path
|
|
843
|
-
path_parts = parsed.path.strip("/").split("/")
|
|
844
|
-
# Verify we have at least owner and repo in the path
|
|
845
|
-
if len(path_parts) >= 2 and path_parts[0] and path_parts[1]:
|
|
846
|
-
# Extract only the owner and repo, ignoring any additional path components
|
|
847
|
-
owner_repo = f"{path_parts[0]}/{path_parts[1]}"
|
|
848
|
-
github_repos.append({
|
|
849
|
-
"url": url,
|
|
850
|
-
"owner_repo": owner_repo,
|
|
851
|
-
"title": result.title or owner_repo,
|
|
852
|
-
"summary": result.text[:200] if result.text else "",
|
|
853
|
-
"highlights": result.highlights[:2] if result.highlights else [],
|
|
854
|
-
"published_date": getattr(result, 'published_date', None)
|
|
855
|
-
})
|
|
856
|
-
except:
|
|
857
|
-
pass
|
|
858
|
-
elif any(doc_pattern in url for doc_pattern in ["docs.", "documentation", ".readthedocs.", "/docs/"]):
|
|
859
|
-
# It's documentation
|
|
860
|
-
documentation.append({
|
|
861
|
-
"url": url,
|
|
862
|
-
"title": result.title or "Documentation",
|
|
863
|
-
"summary": result.text[:200] if result.text else "",
|
|
864
|
-
"highlights": result.highlights[:2] if result.highlights else []
|
|
865
|
-
})
|
|
866
|
-
else:
|
|
867
|
-
# Other content
|
|
868
|
-
other_content.append({
|
|
869
|
-
"url": url,
|
|
870
|
-
"title": result.title or "Content",
|
|
871
|
-
"summary": result.text[:200] if result.text else ""
|
|
872
|
-
})
|
|
772
|
+
# Extract results
|
|
773
|
+
github_repos = result.get("github_repos", [])
|
|
774
|
+
documentation = result.get("documentation", [])
|
|
775
|
+
other_content = result.get("other_content", [])
|
|
873
776
|
|
|
874
777
|
# Format response to naturally guide next actions
|
|
875
|
-
|
|
778
|
+
response_text = f"## 🔍 NIA Web Search Results for: \"{query}\"\n\n"
|
|
876
779
|
|
|
877
780
|
if days_back:
|
|
878
|
-
|
|
781
|
+
response_text += f"*Showing results from the last {days_back} days*\n\n"
|
|
879
782
|
|
|
880
783
|
if find_similar_to:
|
|
881
|
-
|
|
784
|
+
response_text += f"*Finding content similar to: {find_similar_to}*\n\n"
|
|
882
785
|
|
|
883
786
|
# GitHub Repositories Section
|
|
884
787
|
if github_repos:
|
|
885
|
-
|
|
788
|
+
response_text += f"### 📦 GitHub Repositories ({len(github_repos)} found)\n\n"
|
|
886
789
|
|
|
887
790
|
for i, repo in enumerate(github_repos[:num_results], 1):
|
|
888
|
-
|
|
889
|
-
|
|
791
|
+
response_text += f"**{i}. {repo['title']}**\n"
|
|
792
|
+
response_text += f" 📍 `{repo['url']}`\n"
|
|
890
793
|
if repo.get('published_date'):
|
|
891
|
-
|
|
794
|
+
response_text += f" 📅 Updated: {repo['published_date']}\n"
|
|
892
795
|
if repo['summary']:
|
|
893
|
-
|
|
796
|
+
response_text += f" 📝 {repo['summary']}...\n"
|
|
894
797
|
if repo['highlights']:
|
|
895
|
-
|
|
896
|
-
|
|
798
|
+
response_text += f" ✨ Key features: {', '.join(repo['highlights'])}\n"
|
|
799
|
+
response_text += "\n"
|
|
897
800
|
|
|
898
801
|
# Be more aggressive based on query specificity
|
|
899
802
|
if len(github_repos) == 1 or any(specific_word in query.lower() for specific_word in ["specific", "exact", "particular", "find me", "looking for"]):
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
803
|
+
response_text += "**🚀 RECOMMENDED ACTION - Index this repository with NIA:**\n"
|
|
804
|
+
response_text += f"```\nIndex {github_repos[0]['owner_repo']}\n```\n"
|
|
805
|
+
response_text += "✨ This will enable AI-powered code search, understanding, and analysis!\n\n"
|
|
903
806
|
else:
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
807
|
+
response_text += "**🚀 Make these repositories searchable with NIA's AI:**\n"
|
|
808
|
+
response_text += f"- **Quick start:** Say \"Index {github_repos[0]['owner_repo']}\"\n"
|
|
809
|
+
response_text += "- **Index multiple:** Say \"Index all repositories\"\n"
|
|
810
|
+
response_text += "- **Benefits:** AI-powered code search, architecture understanding, implementation details\n\n"
|
|
908
811
|
|
|
909
812
|
# Documentation Section
|
|
910
813
|
if documentation:
|
|
911
|
-
|
|
814
|
+
response_text += f"### 📚 Documentation ({len(documentation)} found)\n\n"
|
|
912
815
|
|
|
913
816
|
for i, doc in enumerate(documentation[:num_results], 1):
|
|
914
|
-
|
|
915
|
-
|
|
817
|
+
response_text += f"**{i}. {doc['title']}**\n"
|
|
818
|
+
response_text += f" 📍 `{doc['url']}`\n"
|
|
916
819
|
if doc['summary']:
|
|
917
|
-
|
|
820
|
+
response_text += f" 📝 {doc['summary']}...\n"
|
|
918
821
|
if doc.get('highlights'):
|
|
919
|
-
|
|
920
|
-
|
|
822
|
+
response_text += f" ✨ Key topics: {', '.join(doc['highlights'])}\n"
|
|
823
|
+
response_text += "\n"
|
|
921
824
|
|
|
922
825
|
# Be more aggressive for documentation too
|
|
923
826
|
if len(documentation) == 1 or any(specific_word in query.lower() for specific_word in ["docs", "documentation", "guide", "tutorial", "reference"]):
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
827
|
+
response_text += "**📖 RECOMMENDED ACTION - Index this documentation with NIA:**\n"
|
|
828
|
+
response_text += f"```\nIndex documentation {documentation[0]['url']}\n```\n"
|
|
829
|
+
response_text += "✨ NIA will make this fully searchable with AI-powered Q&A!\n\n"
|
|
927
830
|
else:
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
831
|
+
response_text += "**📖 Make this documentation AI-searchable with NIA:**\n"
|
|
832
|
+
response_text += f"- **Quick start:** Say \"Index documentation {documentation[0]['url']}\"\n"
|
|
833
|
+
response_text += "- **Index all:** Say \"Index all documentation\"\n"
|
|
834
|
+
response_text += "- **Benefits:** Instant answers, smart search, code examples extraction\n\n"
|
|
932
835
|
|
|
933
836
|
# Other Content Section
|
|
934
837
|
if other_content and not github_repos and not documentation:
|
|
935
|
-
|
|
838
|
+
response_text += f"### 🌐 Other Content ({len(other_content)} found)\n\n"
|
|
936
839
|
|
|
937
840
|
for i, content in enumerate(other_content[:num_results], 1):
|
|
938
|
-
|
|
939
|
-
|
|
841
|
+
response_text += f"**{i}. {content['title']}**\n"
|
|
842
|
+
response_text += f" 📍 `{content['url']}`\n"
|
|
940
843
|
if content['summary']:
|
|
941
|
-
|
|
942
|
-
|
|
844
|
+
response_text += f" 📝 {content['summary']}...\n"
|
|
845
|
+
response_text += "\n"
|
|
943
846
|
|
|
944
847
|
# No results found
|
|
945
848
|
if not github_repos and not documentation and not other_content:
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
849
|
+
response_text = f"No results found for '{query}'. Try:\n"
|
|
850
|
+
response_text += "- Using different keywords\n"
|
|
851
|
+
response_text += "- Being more specific (e.g., 'Python RAG implementation')\n"
|
|
852
|
+
response_text += "- Including technology names (e.g., 'LangChain', 'TypeScript')\n"
|
|
950
853
|
|
|
951
854
|
# Add prominent call-to-action if we found indexable content
|
|
952
855
|
if github_repos or documentation:
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
856
|
+
response_text += "\n## 🎯 **Ready to unlock NIA's AI capabilities?**\n"
|
|
857
|
+
response_text += "The repositories and documentation above can be indexed for:\n"
|
|
858
|
+
response_text += "- 🤖 AI-powered code understanding and search\n"
|
|
859
|
+
response_text += "- 💡 Instant answers to technical questions\n"
|
|
860
|
+
response_text += "- 🔍 Deep architectural insights\n"
|
|
861
|
+
response_text += "- 📚 Smart documentation Q&A\n\n"
|
|
862
|
+
response_text += "**Just copy and paste the index commands above!**\n"
|
|
960
863
|
|
|
961
864
|
# Add search metadata
|
|
962
|
-
|
|
963
|
-
|
|
865
|
+
response_text += f"\n---\n"
|
|
866
|
+
response_text += f"*Searched {result.get('total_results', 0)} sources using NIA Web Search*"
|
|
964
867
|
|
|
965
|
-
return [TextContent(type="text", text=
|
|
868
|
+
return [TextContent(type="text", text=response_text)]
|
|
966
869
|
|
|
870
|
+
except APIError as e:
|
|
871
|
+
logger.error(f"API Error in web search: {e}")
|
|
872
|
+
if e.status_code == 403 or "free tier limit" in str(e).lower() or "free api requests" in str(e).lower():
|
|
873
|
+
if e.detail and "25 free API requests" in e.detail:
|
|
874
|
+
return [TextContent(
|
|
875
|
+
type="text",
|
|
876
|
+
text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited API access."
|
|
877
|
+
)]
|
|
878
|
+
else:
|
|
879
|
+
return [TextContent(
|
|
880
|
+
type="text",
|
|
881
|
+
text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
|
|
882
|
+
)]
|
|
883
|
+
else:
|
|
884
|
+
return [TextContent(type="text", text=f"❌ {str(e)}")]
|
|
967
885
|
except Exception as e:
|
|
968
886
|
logger.error(f"Error in NIA web search: {str(e)}")
|
|
969
887
|
return [TextContent(
|
|
@@ -981,7 +899,7 @@ async def nia_deep_research_agent(
|
|
|
981
899
|
) -> List[TextContent]:
|
|
982
900
|
"""
|
|
983
901
|
Perform deep, multi-step research on a topic using advanced AI research capabilities.
|
|
984
|
-
Best for complex questions that need comprehensive analysis.
|
|
902
|
+
Best for complex questions that need comprehensive analysis. Don't just use keywords or unstrctured query, make a comprehensive question to get the best results possible.
|
|
985
903
|
|
|
986
904
|
USE THIS TOOL WHEN:
|
|
987
905
|
- Comparing multiple options ("compare X vs Y vs Z")
|
|
@@ -1011,154 +929,138 @@ async def nia_deep_research_agent(
|
|
|
1011
929
|
Comprehensive research results with citations
|
|
1012
930
|
"""
|
|
1013
931
|
try:
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
return [TextContent(
|
|
1018
|
-
type="text",
|
|
1019
|
-
text="❌ Deep research unavailable. This advanced feature requires additional configuration."
|
|
1020
|
-
)]
|
|
932
|
+
client = await ensure_api_client()
|
|
933
|
+
|
|
934
|
+
logger.info(f"Starting deep research for: {query}")
|
|
1021
935
|
|
|
1022
|
-
#
|
|
936
|
+
# Use the API client method with proper timeout handling
|
|
1023
937
|
try:
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
938
|
+
result = await asyncio.wait_for(
|
|
939
|
+
client.deep_research(query=query, output_format=output_format),
|
|
940
|
+
timeout=720.0 # 12 minutes to allow for longer research tasks
|
|
941
|
+
)
|
|
942
|
+
except asyncio.TimeoutError:
|
|
943
|
+
logger.error(f"Deep research timed out after 12 minutes for query: {query}")
|
|
1027
944
|
return [TextContent(
|
|
1028
945
|
type="text",
|
|
1029
|
-
text="❌ Research
|
|
946
|
+
text="❌ Research timed out. The query may be too complex. Try:\n"
|
|
947
|
+
"- Breaking it into smaller questions\n"
|
|
948
|
+
"- Using more specific keywords\n"
|
|
949
|
+
"- Trying the nia_web_search tool for simpler queries"
|
|
1030
950
|
)]
|
|
1031
951
|
|
|
1032
|
-
#
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
logger.info(f"Starting deep research for: {query}")
|
|
952
|
+
# Format the research results
|
|
953
|
+
response_text = f"## 🔬 NIA Deep Research Agent Results\n\n"
|
|
954
|
+
response_text += f"**Query:** {query}\n\n"
|
|
1036
955
|
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
# Let the AI infer the schema based on the query
|
|
1040
|
-
task = client.research.create_task(
|
|
1041
|
-
instructions=query,
|
|
1042
|
-
infer_schema=True,
|
|
1043
|
-
model="exa-research-pro" # Use the pro model
|
|
1044
|
-
)
|
|
956
|
+
if result.get("data"):
|
|
957
|
+
response_text += "### 📊 Research Findings:\n\n"
|
|
1045
958
|
|
|
1046
|
-
|
|
959
|
+
# Pretty print the JSON data
|
|
1047
960
|
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
task.id,
|
|
1051
|
-
poll_interval=3,
|
|
1052
|
-
max_wait_time=120
|
|
1053
|
-
)
|
|
961
|
+
formatted_data = json.dumps(result["data"], indent=2)
|
|
962
|
+
response_text += f"```json\n{formatted_data}\n```\n\n"
|
|
1054
963
|
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
)
|
|
964
|
+
# Add citations if available
|
|
965
|
+
if result.get("citations"):
|
|
966
|
+
response_text += "### 📚 Sources & Citations:\n\n"
|
|
967
|
+
citation_num = 1
|
|
968
|
+
for field, citations in result["citations"].items():
|
|
969
|
+
if citations:
|
|
970
|
+
response_text += f"**{field}:**\n"
|
|
971
|
+
for citation in citations[:3]: # Limit to 3 citations per field
|
|
972
|
+
response_text += f"{citation_num}. [{citation.get('title', 'Source')}]({citation.get('url', '#')})\n"
|
|
973
|
+
if citation.get('snippet'):
|
|
974
|
+
response_text += f" > {citation['snippet'][:150]}...\n"
|
|
975
|
+
citation_num += 1
|
|
976
|
+
response_text += "\n"
|
|
1060
977
|
|
|
1061
|
-
|
|
1062
|
-
response = f"## 🔬 NIA Deep Research Agent Results\n\n"
|
|
1063
|
-
response += f"**Query:** {query}\n\n"
|
|
978
|
+
response_text += "### 💡 RECOMMENDED NEXT ACTIONS WITH NIA:\n\n"
|
|
1064
979
|
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
if result.citations:
|
|
1074
|
-
response += "### 📚 Sources & Citations:\n\n"
|
|
1075
|
-
citation_num = 1
|
|
1076
|
-
for field, citations in result.citations.items():
|
|
1077
|
-
if citations:
|
|
1078
|
-
response += f"**{field}:**\n"
|
|
1079
|
-
for citation in citations[:3]: # Limit to 3 citations per field
|
|
1080
|
-
response += f"{citation_num}. [{citation.get('title', 'Source')}]({citation.get('url', '#')})\n"
|
|
1081
|
-
if citation.get('snippet'):
|
|
1082
|
-
response += f" > {citation['snippet'][:150]}...\n"
|
|
1083
|
-
citation_num += 1
|
|
1084
|
-
response += "\n"
|
|
1085
|
-
|
|
1086
|
-
response += "### 💡 RECOMMENDED NEXT ACTIONS WITH NIA:\n\n"
|
|
1087
|
-
|
|
1088
|
-
# Extract potential repos and docs from the research data
|
|
1089
|
-
repos_found = []
|
|
1090
|
-
docs_found = []
|
|
1091
|
-
|
|
1092
|
-
# Helper function to extract URLs from nested data structures
|
|
1093
|
-
def extract_urls_from_data(data, urls_list=None):
|
|
1094
|
-
if urls_list is None:
|
|
1095
|
-
urls_list = []
|
|
1096
|
-
|
|
1097
|
-
if isinstance(data, dict):
|
|
1098
|
-
for value in data.values():
|
|
1099
|
-
extract_urls_from_data(value, urls_list)
|
|
1100
|
-
elif isinstance(data, list):
|
|
1101
|
-
for item in data:
|
|
1102
|
-
extract_urls_from_data(item, urls_list)
|
|
1103
|
-
elif isinstance(data, str):
|
|
1104
|
-
# Check if this string is a URL
|
|
1105
|
-
if data.startswith(('http://', 'https://')):
|
|
1106
|
-
urls_list.append(data)
|
|
1107
|
-
|
|
1108
|
-
return urls_list
|
|
1109
|
-
|
|
1110
|
-
# Extract all URLs from the data
|
|
1111
|
-
all_urls = extract_urls_from_data(result.data)
|
|
1112
|
-
|
|
1113
|
-
# Filter for GitHub repos and documentation
|
|
1114
|
-
import re
|
|
1115
|
-
github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
|
|
1116
|
-
|
|
1117
|
-
for url in all_urls:
|
|
1118
|
-
# Check for GitHub repos
|
|
1119
|
-
github_match = re.search(github_pattern, url)
|
|
1120
|
-
if github_match and '/tree/' not in url and '/blob/' not in url:
|
|
1121
|
-
repos_found.append(github_match.group(1))
|
|
1122
|
-
# Check for documentation URLs
|
|
1123
|
-
elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
|
|
1124
|
-
docs_found.append(url)
|
|
1125
|
-
|
|
1126
|
-
# Remove duplicates and limit results
|
|
1127
|
-
repos_found = list(set(repos_found))[:3]
|
|
1128
|
-
docs_found = list(set(docs_found))[:3]
|
|
1129
|
-
|
|
1130
|
-
if repos_found:
|
|
1131
|
-
response += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
|
|
1132
|
-
for repo in repos_found:
|
|
1133
|
-
response += f"```\nIndex {repo}\n```\n"
|
|
1134
|
-
response += "✨ Enable AI-powered code search and architecture understanding!\n\n"
|
|
1135
|
-
|
|
1136
|
-
if docs_found:
|
|
1137
|
-
response += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
|
|
1138
|
-
for doc in docs_found[:2]: # Limit to 2 for readability
|
|
1139
|
-
response += f"```\nIndex documentation {doc}\n```\n"
|
|
1140
|
-
response += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
|
|
980
|
+
# Extract potential repos and docs from the research data
|
|
981
|
+
repos_found = []
|
|
982
|
+
docs_found = []
|
|
983
|
+
|
|
984
|
+
# Helper function to extract URLs from nested data structures
|
|
985
|
+
def extract_urls_from_data(data, urls_list=None):
|
|
986
|
+
if urls_list is None:
|
|
987
|
+
urls_list = []
|
|
1141
988
|
|
|
1142
|
-
if
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
989
|
+
if isinstance(data, dict):
|
|
990
|
+
for value in data.values():
|
|
991
|
+
extract_urls_from_data(value, urls_list)
|
|
992
|
+
elif isinstance(data, list):
|
|
993
|
+
for item in data:
|
|
994
|
+
extract_urls_from_data(item, urls_list)
|
|
995
|
+
elif isinstance(data, str):
|
|
996
|
+
# Check if this string is a URL
|
|
997
|
+
if data.startswith(('http://', 'https://')):
|
|
998
|
+
urls_list.append(data)
|
|
1147
999
|
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
else:
|
|
1153
|
-
response += "No structured data returned. The research may need a more specific query."
|
|
1000
|
+
return urls_list
|
|
1001
|
+
|
|
1002
|
+
# Extract all URLs from the data
|
|
1003
|
+
all_urls = extract_urls_from_data(result["data"])
|
|
1154
1004
|
|
|
1155
|
-
|
|
1005
|
+
# Filter for GitHub repos and documentation
|
|
1006
|
+
import re
|
|
1007
|
+
github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
|
|
1156
1008
|
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1009
|
+
for url in all_urls:
|
|
1010
|
+
# Check for GitHub repos
|
|
1011
|
+
github_match = re.search(github_pattern, url)
|
|
1012
|
+
if github_match and '/tree/' not in url and '/blob/' not in url:
|
|
1013
|
+
repos_found.append(github_match.group(1))
|
|
1014
|
+
# Check for documentation URLs
|
|
1015
|
+
elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
|
|
1016
|
+
docs_found.append(url)
|
|
1161
1017
|
|
|
1018
|
+
# Remove duplicates and limit results
|
|
1019
|
+
repos_found = list(set(repos_found))[:3]
|
|
1020
|
+
docs_found = list(set(docs_found))[:3]
|
|
1021
|
+
|
|
1022
|
+
if repos_found:
|
|
1023
|
+
response_text += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
|
|
1024
|
+
for repo in repos_found:
|
|
1025
|
+
response_text += f"```\nIndex {repo}\n```\n"
|
|
1026
|
+
response_text += "✨ Enable AI-powered code search and architecture understanding!\n\n"
|
|
1027
|
+
|
|
1028
|
+
if docs_found:
|
|
1029
|
+
response_text += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
|
|
1030
|
+
for doc in docs_found[:2]: # Limit to 2 for readability
|
|
1031
|
+
response_text += f"```\nIndex documentation {doc}\n```\n"
|
|
1032
|
+
response_text += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
|
|
1033
|
+
|
|
1034
|
+
if not repos_found and not docs_found:
|
|
1035
|
+
response_text += "**🔍 Manual indexing options:**\n"
|
|
1036
|
+
response_text += "- If you see any GitHub repos mentioned: Say \"Index [owner/repo]\"\n"
|
|
1037
|
+
response_text += "- If you see any documentation sites: Say \"Index documentation [url]\"\n"
|
|
1038
|
+
response_text += "- These will unlock NIA's powerful AI search capabilities!\n\n"
|
|
1039
|
+
|
|
1040
|
+
response_text += "**📊 Other actions:**\n"
|
|
1041
|
+
response_text += "- Ask follow-up questions about the research\n"
|
|
1042
|
+
response_text += "- Request a different analysis format\n"
|
|
1043
|
+
response_text += "- Search for more specific information\n"
|
|
1044
|
+
else:
|
|
1045
|
+
response_text += "No structured data returned. The research may need a more specific query."
|
|
1046
|
+
|
|
1047
|
+
return [TextContent(type="text", text=response_text)]
|
|
1048
|
+
|
|
1049
|
+
except APIError as e:
|
|
1050
|
+
logger.error(f"API Error in deep research: {e}")
|
|
1051
|
+
if e.status_code == 403 or "free tier limit" in str(e).lower() or "free api requests" in str(e).lower():
|
|
1052
|
+
if e.detail and "25 free API requests" in e.detail:
|
|
1053
|
+
return [TextContent(
|
|
1054
|
+
type="text",
|
|
1055
|
+
text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited API access."
|
|
1056
|
+
)]
|
|
1057
|
+
else:
|
|
1058
|
+
return [TextContent(
|
|
1059
|
+
type="text",
|
|
1060
|
+
text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
|
|
1061
|
+
)]
|
|
1062
|
+
else:
|
|
1063
|
+
return [TextContent(type="text", text=f"❌ {str(e)}")]
|
|
1162
1064
|
except Exception as e:
|
|
1163
1065
|
logger.error(f"Error in deep research: {str(e)}")
|
|
1164
1066
|
return [TextContent(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|