nia-mcp-server 1.0.1__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nia-mcp-server might be problematic. Click here for more details.
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/PKG-INFO +1 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/pyproject.toml +1 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/src/nia_mcp_server/__init__.py +1 -1
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/src/nia_mcp_server/api_client.py +63 -2
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/src/nia_mcp_server/server.py +192 -291
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/.gitignore +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/ARCHITECTURE.md +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/LICENSE +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/README.md +0 -0
- {nia_mcp_server-1.0.1 → nia_mcp_server-1.0.3}/src/nia_mcp_server/__main__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nia-mcp-server
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.3
|
|
4
4
|
Summary: NIA Knowledge Agent - MCP server for intelligent codebase search
|
|
5
5
|
Project-URL: Homepage, https://trynia.ai
|
|
6
6
|
Project-URL: Documentation, https://docs.trynia.ai
|
|
@@ -20,7 +20,6 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
20
20
|
Classifier: Programming Language :: Python :: 3.11
|
|
21
21
|
Classifier: Programming Language :: Python :: 3.12
|
|
22
22
|
Requires-Python: >=3.8
|
|
23
|
-
Requires-Dist: exa-py>=1.0.8
|
|
24
23
|
Requires-Dist: httpx>=0.24.0
|
|
25
24
|
Requires-Dist: mcp>=0.1.0
|
|
26
25
|
Requires-Dist: pydantic>=2.0.0
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "nia-mcp-server"
|
|
7
|
-
version = "1.0.
|
|
7
|
+
version = "1.0.3"
|
|
8
8
|
description = "NIA Knowledge Agent - MCP server for intelligent codebase search"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -30,7 +30,6 @@ dependencies = [
|
|
|
30
30
|
"httpx>=0.24.0",
|
|
31
31
|
"pydantic>=2.0.0",
|
|
32
32
|
"python-dotenv>=1.0.0",
|
|
33
|
-
"exa-py>=1.0.8",
|
|
34
33
|
]
|
|
35
34
|
|
|
36
35
|
[project.urls]
|
|
@@ -31,7 +31,7 @@ class NIAApiClient:
|
|
|
31
31
|
"User-Agent": "nia-mcp-server/1.0.0",
|
|
32
32
|
"Content-Type": "application/json"
|
|
33
33
|
},
|
|
34
|
-
timeout=
|
|
34
|
+
timeout=720.0 # 12 minute timeout for deep research operations
|
|
35
35
|
)
|
|
36
36
|
|
|
37
37
|
async def close(self):
|
|
@@ -474,4 +474,65 @@ class NIAApiClient:
|
|
|
474
474
|
except httpx.HTTPStatusError as e:
|
|
475
475
|
raise self._handle_api_error(e)
|
|
476
476
|
except Exception as e:
|
|
477
|
-
raise APIError(f"Query failed: {str(e)}")
|
|
477
|
+
raise APIError(f"Query failed: {str(e)}")
|
|
478
|
+
|
|
479
|
+
async def web_search(
|
|
480
|
+
self,
|
|
481
|
+
query: str,
|
|
482
|
+
num_results: int = 5,
|
|
483
|
+
category: Optional[str] = None,
|
|
484
|
+
days_back: Optional[int] = None,
|
|
485
|
+
find_similar_to: Optional[str] = None
|
|
486
|
+
) -> Dict[str, Any]:
|
|
487
|
+
"""Perform AI-powered web search."""
|
|
488
|
+
try:
|
|
489
|
+
payload = {
|
|
490
|
+
"query": query,
|
|
491
|
+
"num_results": min(num_results, 10),
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
# Add optional parameters
|
|
495
|
+
if category:
|
|
496
|
+
payload["category"] = category
|
|
497
|
+
if days_back:
|
|
498
|
+
payload["days_back"] = days_back
|
|
499
|
+
if find_similar_to:
|
|
500
|
+
payload["find_similar_to"] = find_similar_to
|
|
501
|
+
|
|
502
|
+
response = await self.client.post(
|
|
503
|
+
f"{self.base_url}/v2/web-search",
|
|
504
|
+
json=payload
|
|
505
|
+
)
|
|
506
|
+
response.raise_for_status()
|
|
507
|
+
return response.json()
|
|
508
|
+
|
|
509
|
+
except httpx.HTTPStatusError as e:
|
|
510
|
+
raise self._handle_api_error(e)
|
|
511
|
+
except Exception as e:
|
|
512
|
+
raise APIError(f"Web search failed: {str(e)}")
|
|
513
|
+
|
|
514
|
+
async def deep_research(
|
|
515
|
+
self,
|
|
516
|
+
query: str,
|
|
517
|
+
output_format: Optional[str] = None
|
|
518
|
+
) -> Dict[str, Any]:
|
|
519
|
+
"""Perform deep research using AI agent."""
|
|
520
|
+
try:
|
|
521
|
+
payload = {
|
|
522
|
+
"query": query,
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
if output_format:
|
|
526
|
+
payload["output_format"] = output_format
|
|
527
|
+
|
|
528
|
+
response = await self.client.post(
|
|
529
|
+
f"{self.base_url}/v2/deep-research",
|
|
530
|
+
json=payload
|
|
531
|
+
)
|
|
532
|
+
response.raise_for_status()
|
|
533
|
+
return response.json()
|
|
534
|
+
|
|
535
|
+
except httpx.HTTPStatusError as e:
|
|
536
|
+
raise self._handle_api_error(e)
|
|
537
|
+
except Exception as e:
|
|
538
|
+
raise APIError(f"Deep research failed: {str(e)}")
|
|
@@ -755,215 +755,132 @@ async def nia_web_search(
|
|
|
755
755
|
Search results with actionable next steps
|
|
756
756
|
"""
|
|
757
757
|
try:
|
|
758
|
-
|
|
759
|
-
api_key = os.getenv("EXA_API_KEY")
|
|
760
|
-
if not api_key:
|
|
761
|
-
return [TextContent(
|
|
762
|
-
type="text",
|
|
763
|
-
text="❌ NIA Web Search unavailable.\n\n"
|
|
764
|
-
"This feature requires additional configuration. "
|
|
765
|
-
"Please contact support for access to advanced search features."
|
|
766
|
-
)]
|
|
767
|
-
|
|
768
|
-
# Import client
|
|
769
|
-
try:
|
|
770
|
-
from exa_py import Exa
|
|
771
|
-
except ImportError:
|
|
772
|
-
return [TextContent(
|
|
773
|
-
type="text",
|
|
774
|
-
text="❌ NIA Web Search unavailable. Please update the NIA MCP server."
|
|
775
|
-
)]
|
|
776
|
-
|
|
777
|
-
# Initialize client
|
|
778
|
-
client = Exa(api_key)
|
|
779
|
-
|
|
780
|
-
# Limit results to reasonable number
|
|
781
|
-
num_results = min(num_results, 10)
|
|
758
|
+
client = await ensure_api_client()
|
|
782
759
|
|
|
783
760
|
logger.info(f"Searching content for query: {query}")
|
|
784
761
|
|
|
785
|
-
#
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
# Add category filter if specified
|
|
794
|
-
if category:
|
|
795
|
-
# Map user-friendly categories to Exa categories
|
|
796
|
-
category_map = {
|
|
797
|
-
"github": "github",
|
|
798
|
-
"company": "company",
|
|
799
|
-
"research": "research paper",
|
|
800
|
-
"news": "news",
|
|
801
|
-
"tweet": "tweet",
|
|
802
|
-
"pdf": "pdf",
|
|
803
|
-
"blog": "personal site"
|
|
804
|
-
}
|
|
805
|
-
if category.lower() in category_map:
|
|
806
|
-
search_params["category"] = category_map[category.lower()]
|
|
807
|
-
|
|
808
|
-
# Add date filter for trending content
|
|
809
|
-
if days_back:
|
|
810
|
-
from datetime import datetime, timedelta
|
|
811
|
-
start_date = (datetime.now() - timedelta(days=days_back)).strftime("%Y-%m-%d")
|
|
812
|
-
search_params["start_published_date"] = start_date
|
|
813
|
-
|
|
814
|
-
# Use similarity search if URL provided
|
|
815
|
-
if find_similar_to:
|
|
816
|
-
results = client.find_similar_and_contents(
|
|
817
|
-
find_similar_to,
|
|
818
|
-
**search_params
|
|
819
|
-
)
|
|
820
|
-
else:
|
|
821
|
-
results = client.search_and_contents(
|
|
822
|
-
query,
|
|
823
|
-
**search_params
|
|
824
|
-
)
|
|
825
|
-
|
|
826
|
-
# Separate results by type
|
|
827
|
-
github_repos = []
|
|
828
|
-
documentation = []
|
|
829
|
-
other_content = []
|
|
762
|
+
# Use the API client method instead of direct HTTP call
|
|
763
|
+
result = await client.web_search(
|
|
764
|
+
query=query,
|
|
765
|
+
num_results=num_results,
|
|
766
|
+
category=category,
|
|
767
|
+
days_back=days_back,
|
|
768
|
+
find_similar_to=find_similar_to
|
|
769
|
+
)
|
|
830
770
|
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
if "github.com" in url and "/tree/" not in url and "/blob/" not in url:
|
|
836
|
-
# It's a GitHub repo (not a specific file)
|
|
837
|
-
# Extract owner/repo from URL
|
|
838
|
-
try:
|
|
839
|
-
parsed = urlparse(url)
|
|
840
|
-
# Ensure we have a valid GitHub URL
|
|
841
|
-
if parsed.hostname == "github.com":
|
|
842
|
-
# Remove leading/trailing slashes and split the path
|
|
843
|
-
path_parts = parsed.path.strip("/").split("/")
|
|
844
|
-
# Verify we have at least owner and repo in the path
|
|
845
|
-
if len(path_parts) >= 2 and path_parts[0] and path_parts[1]:
|
|
846
|
-
# Extract only the owner and repo, ignoring any additional path components
|
|
847
|
-
owner_repo = f"{path_parts[0]}/{path_parts[1]}"
|
|
848
|
-
github_repos.append({
|
|
849
|
-
"url": url,
|
|
850
|
-
"owner_repo": owner_repo,
|
|
851
|
-
"title": result.title or owner_repo,
|
|
852
|
-
"summary": result.text[:200] if result.text else "",
|
|
853
|
-
"highlights": result.highlights[:2] if result.highlights else [],
|
|
854
|
-
"published_date": getattr(result, 'published_date', None)
|
|
855
|
-
})
|
|
856
|
-
except:
|
|
857
|
-
pass
|
|
858
|
-
elif any(doc_pattern in url for doc_pattern in ["docs.", "documentation", ".readthedocs.", "/docs/"]):
|
|
859
|
-
# It's documentation
|
|
860
|
-
documentation.append({
|
|
861
|
-
"url": url,
|
|
862
|
-
"title": result.title or "Documentation",
|
|
863
|
-
"summary": result.text[:200] if result.text else "",
|
|
864
|
-
"highlights": result.highlights[:2] if result.highlights else []
|
|
865
|
-
})
|
|
866
|
-
else:
|
|
867
|
-
# Other content
|
|
868
|
-
other_content.append({
|
|
869
|
-
"url": url,
|
|
870
|
-
"title": result.title or "Content",
|
|
871
|
-
"summary": result.text[:200] if result.text else ""
|
|
872
|
-
})
|
|
771
|
+
# Extract results
|
|
772
|
+
github_repos = result.get("github_repos", [])
|
|
773
|
+
documentation = result.get("documentation", [])
|
|
774
|
+
other_content = result.get("other_content", [])
|
|
873
775
|
|
|
874
776
|
# Format response to naturally guide next actions
|
|
875
|
-
|
|
777
|
+
response_text = f"## 🔍 NIA Web Search Results for: \"{query}\"\n\n"
|
|
876
778
|
|
|
877
779
|
if days_back:
|
|
878
|
-
|
|
780
|
+
response_text += f"*Showing results from the last {days_back} days*\n\n"
|
|
879
781
|
|
|
880
782
|
if find_similar_to:
|
|
881
|
-
|
|
783
|
+
response_text += f"*Finding content similar to: {find_similar_to}*\n\n"
|
|
882
784
|
|
|
883
785
|
# GitHub Repositories Section
|
|
884
786
|
if github_repos:
|
|
885
|
-
|
|
787
|
+
response_text += f"### 📦 GitHub Repositories ({len(github_repos)} found)\n\n"
|
|
886
788
|
|
|
887
789
|
for i, repo in enumerate(github_repos[:num_results], 1):
|
|
888
|
-
|
|
889
|
-
|
|
790
|
+
response_text += f"**{i}. {repo['title']}**\n"
|
|
791
|
+
response_text += f" 📍 `{repo['url']}`\n"
|
|
890
792
|
if repo.get('published_date'):
|
|
891
|
-
|
|
793
|
+
response_text += f" 📅 Updated: {repo['published_date']}\n"
|
|
892
794
|
if repo['summary']:
|
|
893
|
-
|
|
795
|
+
response_text += f" 📝 {repo['summary']}...\n"
|
|
894
796
|
if repo['highlights']:
|
|
895
|
-
|
|
896
|
-
|
|
797
|
+
response_text += f" ✨ Key features: {', '.join(repo['highlights'])}\n"
|
|
798
|
+
response_text += "\n"
|
|
897
799
|
|
|
898
800
|
# Be more aggressive based on query specificity
|
|
899
801
|
if len(github_repos) == 1 or any(specific_word in query.lower() for specific_word in ["specific", "exact", "particular", "find me", "looking for"]):
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
802
|
+
response_text += "**🚀 RECOMMENDED ACTION - Index this repository with NIA:**\n"
|
|
803
|
+
response_text += f"```\nIndex {github_repos[0]['owner_repo']}\n```\n"
|
|
804
|
+
response_text += "✨ This will enable AI-powered code search, understanding, and analysis!\n\n"
|
|
903
805
|
else:
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
806
|
+
response_text += "**🚀 Make these repositories searchable with NIA's AI:**\n"
|
|
807
|
+
response_text += f"- **Quick start:** Say \"Index {github_repos[0]['owner_repo']}\"\n"
|
|
808
|
+
response_text += "- **Index multiple:** Say \"Index all repositories\"\n"
|
|
809
|
+
response_text += "- **Benefits:** AI-powered code search, architecture understanding, implementation details\n\n"
|
|
908
810
|
|
|
909
811
|
# Documentation Section
|
|
910
812
|
if documentation:
|
|
911
|
-
|
|
813
|
+
response_text += f"### 📚 Documentation ({len(documentation)} found)\n\n"
|
|
912
814
|
|
|
913
815
|
for i, doc in enumerate(documentation[:num_results], 1):
|
|
914
|
-
|
|
915
|
-
|
|
816
|
+
response_text += f"**{i}. {doc['title']}**\n"
|
|
817
|
+
response_text += f" 📍 `{doc['url']}`\n"
|
|
916
818
|
if doc['summary']:
|
|
917
|
-
|
|
819
|
+
response_text += f" 📝 {doc['summary']}...\n"
|
|
918
820
|
if doc.get('highlights'):
|
|
919
|
-
|
|
920
|
-
|
|
821
|
+
response_text += f" ✨ Key topics: {', '.join(doc['highlights'])}\n"
|
|
822
|
+
response_text += "\n"
|
|
921
823
|
|
|
922
824
|
# Be more aggressive for documentation too
|
|
923
825
|
if len(documentation) == 1 or any(specific_word in query.lower() for specific_word in ["docs", "documentation", "guide", "tutorial", "reference"]):
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
826
|
+
response_text += "**📖 RECOMMENDED ACTION - Index this documentation with NIA:**\n"
|
|
827
|
+
response_text += f"```\nIndex documentation {documentation[0]['url']}\n```\n"
|
|
828
|
+
response_text += "✨ NIA will make this fully searchable with AI-powered Q&A!\n\n"
|
|
927
829
|
else:
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
830
|
+
response_text += "**📖 Make this documentation AI-searchable with NIA:**\n"
|
|
831
|
+
response_text += f"- **Quick start:** Say \"Index documentation {documentation[0]['url']}\"\n"
|
|
832
|
+
response_text += "- **Index all:** Say \"Index all documentation\"\n"
|
|
833
|
+
response_text += "- **Benefits:** Instant answers, smart search, code examples extraction\n\n"
|
|
932
834
|
|
|
933
835
|
# Other Content Section
|
|
934
836
|
if other_content and not github_repos and not documentation:
|
|
935
|
-
|
|
837
|
+
response_text += f"### 🌐 Other Content ({len(other_content)} found)\n\n"
|
|
936
838
|
|
|
937
839
|
for i, content in enumerate(other_content[:num_results], 1):
|
|
938
|
-
|
|
939
|
-
|
|
840
|
+
response_text += f"**{i}. {content['title']}**\n"
|
|
841
|
+
response_text += f" 📍 `{content['url']}`\n"
|
|
940
842
|
if content['summary']:
|
|
941
|
-
|
|
942
|
-
|
|
843
|
+
response_text += f" 📝 {content['summary']}...\n"
|
|
844
|
+
response_text += "\n"
|
|
943
845
|
|
|
944
846
|
# No results found
|
|
945
847
|
if not github_repos and not documentation and not other_content:
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
848
|
+
response_text = f"No results found for '{query}'. Try:\n"
|
|
849
|
+
response_text += "- Using different keywords\n"
|
|
850
|
+
response_text += "- Being more specific (e.g., 'Python RAG implementation')\n"
|
|
851
|
+
response_text += "- Including technology names (e.g., 'LangChain', 'TypeScript')\n"
|
|
950
852
|
|
|
951
853
|
# Add prominent call-to-action if we found indexable content
|
|
952
854
|
if github_repos or documentation:
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
855
|
+
response_text += "\n## 🎯 **Ready to unlock NIA's AI capabilities?**\n"
|
|
856
|
+
response_text += "The repositories and documentation above can be indexed for:\n"
|
|
857
|
+
response_text += "- 🤖 AI-powered code understanding and search\n"
|
|
858
|
+
response_text += "- 💡 Instant answers to technical questions\n"
|
|
859
|
+
response_text += "- 🔍 Deep architectural insights\n"
|
|
860
|
+
response_text += "- 📚 Smart documentation Q&A\n\n"
|
|
861
|
+
response_text += "**Just copy and paste the index commands above!**\n"
|
|
960
862
|
|
|
961
863
|
# Add search metadata
|
|
962
|
-
|
|
963
|
-
|
|
864
|
+
response_text += f"\n---\n"
|
|
865
|
+
response_text += f"*Searched {result.get('total_results', 0)} sources using NIA Web Search*"
|
|
964
866
|
|
|
965
|
-
return [TextContent(type="text", text=
|
|
867
|
+
return [TextContent(type="text", text=response_text)]
|
|
966
868
|
|
|
869
|
+
except APIError as e:
|
|
870
|
+
logger.error(f"API Error in web search: {e}")
|
|
871
|
+
if e.status_code == 403 or "free tier limit" in str(e).lower() or "free api requests" in str(e).lower():
|
|
872
|
+
if e.detail and "25 free API requests" in e.detail:
|
|
873
|
+
return [TextContent(
|
|
874
|
+
type="text",
|
|
875
|
+
text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited API access."
|
|
876
|
+
)]
|
|
877
|
+
else:
|
|
878
|
+
return [TextContent(
|
|
879
|
+
type="text",
|
|
880
|
+
text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
|
|
881
|
+
)]
|
|
882
|
+
else:
|
|
883
|
+
return [TextContent(type="text", text=f"❌ {str(e)}")]
|
|
967
884
|
except Exception as e:
|
|
968
885
|
logger.error(f"Error in NIA web search: {str(e)}")
|
|
969
886
|
return [TextContent(
|
|
@@ -1011,154 +928,138 @@ async def nia_deep_research_agent(
|
|
|
1011
928
|
Comprehensive research results with citations
|
|
1012
929
|
"""
|
|
1013
930
|
try:
|
|
1014
|
-
|
|
1015
|
-
api_key = os.getenv("EXA_API_KEY")
|
|
1016
|
-
if not api_key:
|
|
1017
|
-
return [TextContent(
|
|
1018
|
-
type="text",
|
|
1019
|
-
text="❌ Deep research unavailable. This advanced feature requires additional configuration."
|
|
1020
|
-
)]
|
|
931
|
+
client = await ensure_api_client()
|
|
1021
932
|
|
|
1022
|
-
|
|
933
|
+
logger.info(f"Starting deep research for: {query}")
|
|
934
|
+
|
|
935
|
+
# Use the API client method with proper timeout handling
|
|
1023
936
|
try:
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
937
|
+
result = await asyncio.wait_for(
|
|
938
|
+
client.deep_research(query=query, output_format=output_format),
|
|
939
|
+
timeout=720.0 # 12 minutes to allow for longer research tasks
|
|
940
|
+
)
|
|
941
|
+
except asyncio.TimeoutError:
|
|
942
|
+
logger.error(f"Deep research timed out after 12 minutes for query: {query}")
|
|
1027
943
|
return [TextContent(
|
|
1028
944
|
type="text",
|
|
1029
|
-
text="❌ Research
|
|
945
|
+
text="❌ Research timed out. The query may be too complex. Try:\n"
|
|
946
|
+
"- Breaking it into smaller questions\n"
|
|
947
|
+
"- Using more specific keywords\n"
|
|
948
|
+
"- Trying the nia_web_search tool for simpler queries"
|
|
1030
949
|
)]
|
|
1031
950
|
|
|
1032
|
-
#
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
logger.info(f"Starting deep research for: {query}")
|
|
951
|
+
# Format the research results
|
|
952
|
+
response_text = f"## 🔬 NIA Deep Research Agent Results\n\n"
|
|
953
|
+
response_text += f"**Query:** {query}\n\n"
|
|
1036
954
|
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
# Let the AI infer the schema based on the query
|
|
1040
|
-
task = client.research.create_task(
|
|
1041
|
-
instructions=query,
|
|
1042
|
-
infer_schema=True,
|
|
1043
|
-
model="exa-research-pro" # Use the pro model
|
|
1044
|
-
)
|
|
955
|
+
if result.get("data"):
|
|
956
|
+
response_text += "### 📊 Research Findings:\n\n"
|
|
1045
957
|
|
|
1046
|
-
|
|
958
|
+
# Pretty print the JSON data
|
|
959
|
+
import json
|
|
960
|
+
formatted_data = json.dumps(result["data"], indent=2)
|
|
961
|
+
response_text += f"```json\n{formatted_data}\n```\n\n"
|
|
1047
962
|
|
|
1048
|
-
#
|
|
1049
|
-
result
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
963
|
+
# Add citations if available
|
|
964
|
+
if result.get("citations"):
|
|
965
|
+
response_text += "### 📚 Sources & Citations:\n\n"
|
|
966
|
+
citation_num = 1
|
|
967
|
+
for field, citations in result["citations"].items():
|
|
968
|
+
if citations:
|
|
969
|
+
response_text += f"**{field}:**\n"
|
|
970
|
+
for citation in citations[:3]: # Limit to 3 citations per field
|
|
971
|
+
response_text += f"{citation_num}. [{citation.get('title', 'Source')}]({citation.get('url', '#')})\n"
|
|
972
|
+
if citation.get('snippet'):
|
|
973
|
+
response_text += f" > {citation['snippet'][:150]}...\n"
|
|
974
|
+
citation_num += 1
|
|
975
|
+
response_text += "\n"
|
|
1054
976
|
|
|
1055
|
-
|
|
1056
|
-
return [TextContent(
|
|
1057
|
-
type="text",
|
|
1058
|
-
text=f"❌ Research failed. Please try rephrasing your question."
|
|
1059
|
-
)]
|
|
977
|
+
response_text += "### 💡 RECOMMENDED NEXT ACTIONS WITH NIA:\n\n"
|
|
1060
978
|
|
|
1061
|
-
#
|
|
1062
|
-
|
|
1063
|
-
|
|
979
|
+
# Extract potential repos and docs from the research data
|
|
980
|
+
repos_found = []
|
|
981
|
+
docs_found = []
|
|
1064
982
|
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
formatted_data = json.dumps(result.data, indent=2)
|
|
1070
|
-
response += f"```json\n{formatted_data}\n```\n\n"
|
|
983
|
+
# Helper function to extract URLs from nested data structures
|
|
984
|
+
def extract_urls_from_data(data, urls_list=None):
|
|
985
|
+
if urls_list is None:
|
|
986
|
+
urls_list = []
|
|
1071
987
|
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
for
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
response += f" > {citation['snippet'][:150]}...\n"
|
|
1083
|
-
citation_num += 1
|
|
1084
|
-
response += "\n"
|
|
988
|
+
if isinstance(data, dict):
|
|
989
|
+
for value in data.values():
|
|
990
|
+
extract_urls_from_data(value, urls_list)
|
|
991
|
+
elif isinstance(data, list):
|
|
992
|
+
for item in data:
|
|
993
|
+
extract_urls_from_data(item, urls_list)
|
|
994
|
+
elif isinstance(data, str):
|
|
995
|
+
# Check if this string is a URL
|
|
996
|
+
if data.startswith(('http://', 'https://')):
|
|
997
|
+
urls_list.append(data)
|
|
1085
998
|
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
docs_found = []
|
|
1091
|
-
|
|
1092
|
-
# Helper function to extract URLs from nested data structures
|
|
1093
|
-
def extract_urls_from_data(data, urls_list=None):
|
|
1094
|
-
if urls_list is None:
|
|
1095
|
-
urls_list = []
|
|
1096
|
-
|
|
1097
|
-
if isinstance(data, dict):
|
|
1098
|
-
for value in data.values():
|
|
1099
|
-
extract_urls_from_data(value, urls_list)
|
|
1100
|
-
elif isinstance(data, list):
|
|
1101
|
-
for item in data:
|
|
1102
|
-
extract_urls_from_data(item, urls_list)
|
|
1103
|
-
elif isinstance(data, str):
|
|
1104
|
-
# Check if this string is a URL
|
|
1105
|
-
if data.startswith(('http://', 'https://')):
|
|
1106
|
-
urls_list.append(data)
|
|
1107
|
-
|
|
1108
|
-
return urls_list
|
|
1109
|
-
|
|
1110
|
-
# Extract all URLs from the data
|
|
1111
|
-
all_urls = extract_urls_from_data(result.data)
|
|
1112
|
-
|
|
1113
|
-
# Filter for GitHub repos and documentation
|
|
1114
|
-
import re
|
|
1115
|
-
github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
|
|
1116
|
-
|
|
1117
|
-
for url in all_urls:
|
|
1118
|
-
# Check for GitHub repos
|
|
1119
|
-
github_match = re.search(github_pattern, url)
|
|
1120
|
-
if github_match and '/tree/' not in url and '/blob/' not in url:
|
|
1121
|
-
repos_found.append(github_match.group(1))
|
|
1122
|
-
# Check for documentation URLs
|
|
1123
|
-
elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
|
|
1124
|
-
docs_found.append(url)
|
|
1125
|
-
|
|
1126
|
-
# Remove duplicates and limit results
|
|
1127
|
-
repos_found = list(set(repos_found))[:3]
|
|
1128
|
-
docs_found = list(set(docs_found))[:3]
|
|
1129
|
-
|
|
1130
|
-
if repos_found:
|
|
1131
|
-
response += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
|
|
1132
|
-
for repo in repos_found:
|
|
1133
|
-
response += f"```\nIndex {repo}\n```\n"
|
|
1134
|
-
response += "✨ Enable AI-powered code search and architecture understanding!\n\n"
|
|
1135
|
-
|
|
1136
|
-
if docs_found:
|
|
1137
|
-
response += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
|
|
1138
|
-
for doc in docs_found[:2]: # Limit to 2 for readability
|
|
1139
|
-
response += f"```\nIndex documentation {doc}\n```\n"
|
|
1140
|
-
response += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
|
|
1141
|
-
|
|
1142
|
-
if not repos_found and not docs_found:
|
|
1143
|
-
response += "**🔍 Manual indexing options:**\n"
|
|
1144
|
-
response += "- If you see any GitHub repos mentioned: Say \"Index [owner/repo]\"\n"
|
|
1145
|
-
response += "- If you see any documentation sites: Say \"Index documentation [url]\"\n"
|
|
1146
|
-
response += "- These will unlock NIA's powerful AI search capabilities!\n\n"
|
|
1147
|
-
|
|
1148
|
-
response += "**📊 Other actions:**\n"
|
|
1149
|
-
response += "- Ask follow-up questions about the research\n"
|
|
1150
|
-
response += "- Request a different analysis format\n"
|
|
1151
|
-
response += "- Search for more specific information\n"
|
|
1152
|
-
else:
|
|
1153
|
-
response += "No structured data returned. The research may need a more specific query."
|
|
999
|
+
return urls_list
|
|
1000
|
+
|
|
1001
|
+
# Extract all URLs from the data
|
|
1002
|
+
all_urls = extract_urls_from_data(result["data"])
|
|
1154
1003
|
|
|
1155
|
-
|
|
1004
|
+
# Filter for GitHub repos and documentation
|
|
1005
|
+
import re
|
|
1006
|
+
github_pattern = r'github\.com/([a-zA-Z0-9-]+/[a-zA-Z0-9-_.]+)'
|
|
1156
1007
|
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1008
|
+
for url in all_urls:
|
|
1009
|
+
# Check for GitHub repos
|
|
1010
|
+
github_match = re.search(github_pattern, url)
|
|
1011
|
+
if github_match and '/tree/' not in url and '/blob/' not in url:
|
|
1012
|
+
repos_found.append(github_match.group(1))
|
|
1013
|
+
# Check for documentation URLs
|
|
1014
|
+
elif any(doc_indicator in url.lower() for doc_indicator in ['docs', 'documentation', '.readthedocs.', '/guide', '/tutorial']):
|
|
1015
|
+
docs_found.append(url)
|
|
1161
1016
|
|
|
1017
|
+
# Remove duplicates and limit results
|
|
1018
|
+
repos_found = list(set(repos_found))[:3]
|
|
1019
|
+
docs_found = list(set(docs_found))[:3]
|
|
1020
|
+
|
|
1021
|
+
if repos_found:
|
|
1022
|
+
response_text += "**🚀 DISCOVERED REPOSITORIES - Index with NIA for deep analysis:**\n"
|
|
1023
|
+
for repo in repos_found:
|
|
1024
|
+
response_text += f"```\nIndex {repo}\n```\n"
|
|
1025
|
+
response_text += "✨ Enable AI-powered code search and architecture understanding!\n\n"
|
|
1026
|
+
|
|
1027
|
+
if docs_found:
|
|
1028
|
+
response_text += "**📖 DISCOVERED DOCUMENTATION - Index with NIA for smart search:**\n"
|
|
1029
|
+
for doc in docs_found[:2]: # Limit to 2 for readability
|
|
1030
|
+
response_text += f"```\nIndex documentation {doc}\n```\n"
|
|
1031
|
+
response_text += "✨ Make documentation instantly searchable with AI Q&A!\n\n"
|
|
1032
|
+
|
|
1033
|
+
if not repos_found and not docs_found:
|
|
1034
|
+
response_text += "**🔍 Manual indexing options:**\n"
|
|
1035
|
+
response_text += "- If you see any GitHub repos mentioned: Say \"Index [owner/repo]\"\n"
|
|
1036
|
+
response_text += "- If you see any documentation sites: Say \"Index documentation [url]\"\n"
|
|
1037
|
+
response_text += "- These will unlock NIA's powerful AI search capabilities!\n\n"
|
|
1038
|
+
|
|
1039
|
+
response_text += "**📊 Other actions:**\n"
|
|
1040
|
+
response_text += "- Ask follow-up questions about the research\n"
|
|
1041
|
+
response_text += "- Request a different analysis format\n"
|
|
1042
|
+
response_text += "- Search for more specific information\n"
|
|
1043
|
+
else:
|
|
1044
|
+
response_text += "No structured data returned. The research may need a more specific query."
|
|
1045
|
+
|
|
1046
|
+
return [TextContent(type="text", text=response_text)]
|
|
1047
|
+
|
|
1048
|
+
except APIError as e:
|
|
1049
|
+
logger.error(f"API Error in deep research: {e}")
|
|
1050
|
+
if e.status_code == 403 or "free tier limit" in str(e).lower() or "free api requests" in str(e).lower():
|
|
1051
|
+
if e.detail and "25 free API requests" in e.detail:
|
|
1052
|
+
return [TextContent(
|
|
1053
|
+
type="text",
|
|
1054
|
+
text=f"❌ {e.detail}\n\n💡 Tip: Upgrade to Pro at https://trynia.ai/billing for unlimited API access."
|
|
1055
|
+
)]
|
|
1056
|
+
else:
|
|
1057
|
+
return [TextContent(
|
|
1058
|
+
type="text",
|
|
1059
|
+
text=f"❌ {str(e)}\n\n💡 Tip: You've reached the free tier limit. Upgrade to Pro for unlimited access."
|
|
1060
|
+
)]
|
|
1061
|
+
else:
|
|
1062
|
+
return [TextContent(type="text", text=f"❌ {str(e)}")]
|
|
1162
1063
|
except Exception as e:
|
|
1163
1064
|
logger.error(f"Error in deep research: {str(e)}")
|
|
1164
1065
|
return [TextContent(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|