nia-mcp-server 1.0.25__py3-none-any.whl → 1.0.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nia-mcp-server might be problematic. Click here for more details.
- nia_mcp_server/__init__.py +1 -1
- nia_mcp_server/api_client.py +503 -99
- nia_mcp_server/cli.py +31 -4
- nia_mcp_server/profiles.py +20 -4
- nia_mcp_server/project_init.py +1 -1
- nia_mcp_server/rule_transformer.py +2 -2
- nia_mcp_server/server.py +2577 -2167
- nia_mcp_server/setup.py +593 -79
- {nia_mcp_server-1.0.25.dist-info → nia_mcp_server-1.0.42.dist-info}/METADATA +7 -4
- nia_mcp_server-1.0.42.dist-info/RECORD +19 -0
- {nia_mcp_server-1.0.25.dist-info → nia_mcp_server-1.0.42.dist-info}/WHEEL +1 -1
- nia_mcp_server-1.0.25.dist-info/RECORD +0 -19
- {nia_mcp_server-1.0.25.dist-info → nia_mcp_server-1.0.42.dist-info}/entry_points.txt +0 -0
- {nia_mcp_server-1.0.25.dist-info → nia_mcp_server-1.0.42.dist-info}/licenses/LICENSE +0 -0
nia_mcp_server/api_client.py
CHANGED
|
@@ -28,7 +28,7 @@ class NIAApiClient:
|
|
|
28
28
|
self.client = httpx.AsyncClient(
|
|
29
29
|
headers={
|
|
30
30
|
"Authorization": f"Bearer {api_key}",
|
|
31
|
-
"User-Agent": "nia-mcp-server/1.0.
|
|
31
|
+
"User-Agent": "nia-mcp-server/1.0.27",
|
|
32
32
|
"Content-Type": "application/json"
|
|
33
33
|
},
|
|
34
34
|
timeout=720.0 # 12 minute timeout for deep research operations
|
|
@@ -46,18 +46,36 @@ class NIAApiClient:
|
|
|
46
46
|
|
|
47
47
|
def _handle_api_error(self, e: httpx.HTTPStatusError) -> APIError:
|
|
48
48
|
"""Convert HTTP errors to more specific API errors."""
|
|
49
|
-
error_detail =
|
|
49
|
+
error_detail = ""
|
|
50
|
+
response_text = None
|
|
51
|
+
|
|
52
|
+
# Safely access response body; streaming responses may not be readable yet.
|
|
50
53
|
try:
|
|
51
|
-
|
|
52
|
-
error_detail =
|
|
53
|
-
except
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
54
|
+
response_text = e.response.text
|
|
55
|
+
error_detail = response_text
|
|
56
|
+
except RuntimeError:
|
|
57
|
+
logger.warning(
|
|
58
|
+
"Unable to read streaming response body while handling HTTP error."
|
|
59
|
+
)
|
|
60
|
+
# Fall back to reason phrase if available.
|
|
61
|
+
error_detail = e.response.reason_phrase or ""
|
|
62
|
+
|
|
63
|
+
if response_text:
|
|
64
|
+
try:
|
|
65
|
+
error_json = e.response.json()
|
|
66
|
+
error_detail = error_json.get("detail", error_detail)
|
|
67
|
+
logger.debug(f"Parsed error JSON: {error_json}")
|
|
68
|
+
except (json.JSONDecodeError, ValueError) as parse_error:
|
|
69
|
+
# Failed to parse JSON response, keep original error_detail
|
|
70
|
+
logger.warning(f"Failed to parse error response as JSON: {parse_error}")
|
|
71
|
+
|
|
57
72
|
status_code = e.response.status_code
|
|
58
|
-
|
|
73
|
+
|
|
74
|
+
if not error_detail:
|
|
75
|
+
error_detail = f"HTTP {status_code} Error"
|
|
76
|
+
|
|
59
77
|
# Log the full error for debugging
|
|
60
|
-
logger.error(f"API error - Status: {status_code},
|
|
78
|
+
logger.error(f"API error - Status: {status_code}, Detail: {error_detail}")
|
|
61
79
|
|
|
62
80
|
# Handle specific error cases
|
|
63
81
|
if status_code == 401:
|
|
@@ -87,6 +105,9 @@ class NIAApiClient:
|
|
|
87
105
|
)
|
|
88
106
|
elif status_code == 429:
|
|
89
107
|
return APIError(f"Rate limit exceeded: {error_detail}", status_code, error_detail)
|
|
108
|
+
elif status_code == 400:
|
|
109
|
+
# Bad Request - return the full error detail from backend
|
|
110
|
+
return APIError(error_detail, status_code, error_detail)
|
|
90
111
|
elif status_code == 404:
|
|
91
112
|
return APIError(f"Resource not found: {error_detail}", status_code, error_detail)
|
|
92
113
|
elif status_code == 500:
|
|
@@ -432,7 +453,104 @@ class NIAApiClient:
|
|
|
432
453
|
except Exception as e:
|
|
433
454
|
logger.error(f"Failed to rename repository: {e}")
|
|
434
455
|
raise APIError(f"Failed to rename repository: {str(e)}")
|
|
435
|
-
|
|
456
|
+
|
|
457
|
+
async def get_github_tree(
|
|
458
|
+
self,
|
|
459
|
+
owner_repo: str,
|
|
460
|
+
branch: Optional[str] = None,
|
|
461
|
+
include_paths: Optional[List[str]] = None,
|
|
462
|
+
exclude_paths: Optional[List[str]] = None,
|
|
463
|
+
file_extensions: Optional[List[str]] = None,
|
|
464
|
+
exclude_extensions: Optional[List[str]] = None,
|
|
465
|
+
show_full_paths: bool = False
|
|
466
|
+
) -> Dict[str, Any]:
|
|
467
|
+
"""Get file tree directly from GitHub API (no FalkorDB dependency).
|
|
468
|
+
|
|
469
|
+
Args:
|
|
470
|
+
owner_repo: Repository in owner/repo format or repository ID
|
|
471
|
+
branch: Optional branch name (defaults to repository's default branch)
|
|
472
|
+
include_paths: Only include files in these paths (e.g., ["src/", "lib/"])
|
|
473
|
+
exclude_paths: Exclude files in these paths (e.g., ["node_modules/", "dist/"])
|
|
474
|
+
file_extensions: Only include these file extensions (e.g., [".py", ".js"])
|
|
475
|
+
exclude_extensions: Exclude these file extensions (e.g., [".md", ".lock"])
|
|
476
|
+
show_full_paths: Show full file paths instead of hierarchical tree
|
|
477
|
+
|
|
478
|
+
Returns:
|
|
479
|
+
GitHub tree structure with files, directories, and stats
|
|
480
|
+
"""
|
|
481
|
+
try:
|
|
482
|
+
# Check if this looks like owner/repo format (contains /)
|
|
483
|
+
if '/' in owner_repo:
|
|
484
|
+
# First, get the repository ID
|
|
485
|
+
status = await self.get_repository_status(owner_repo)
|
|
486
|
+
if not status:
|
|
487
|
+
raise APIError(f"Repository {owner_repo} not found", 404)
|
|
488
|
+
|
|
489
|
+
# Extract the repository ID from status
|
|
490
|
+
repo_id = status.get("repository_id") or status.get("id")
|
|
491
|
+
if not repo_id:
|
|
492
|
+
# Try to get it from list as fallback
|
|
493
|
+
repos = await self.list_repositories()
|
|
494
|
+
for repo in repos:
|
|
495
|
+
if repo.get("repository") == owner_repo:
|
|
496
|
+
repo_id = repo.get("repository_id") or repo.get("id")
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
if not repo_id:
|
|
500
|
+
raise APIError(f"No repository ID found for {owner_repo}", 404)
|
|
501
|
+
|
|
502
|
+
# Get tree using the ID
|
|
503
|
+
params = {}
|
|
504
|
+
if branch:
|
|
505
|
+
params["branch"] = branch
|
|
506
|
+
if include_paths:
|
|
507
|
+
params["include_paths"] = ",".join(include_paths)
|
|
508
|
+
if exclude_paths:
|
|
509
|
+
params["exclude_paths"] = ",".join(exclude_paths)
|
|
510
|
+
if file_extensions:
|
|
511
|
+
params["file_extensions"] = ",".join(file_extensions)
|
|
512
|
+
if exclude_extensions:
|
|
513
|
+
params["exclude_extensions"] = ",".join(exclude_extensions)
|
|
514
|
+
if show_full_paths:
|
|
515
|
+
params["show_full_paths"] = "true"
|
|
516
|
+
|
|
517
|
+
response = await self.client.get(
|
|
518
|
+
f"{self.base_url}/v2/repositories/{repo_id}/github-tree",
|
|
519
|
+
params=params
|
|
520
|
+
)
|
|
521
|
+
response.raise_for_status()
|
|
522
|
+
return response.json()
|
|
523
|
+
else:
|
|
524
|
+
# Assume it's already a repository ID
|
|
525
|
+
params = {}
|
|
526
|
+
if branch:
|
|
527
|
+
params["branch"] = branch
|
|
528
|
+
if include_paths:
|
|
529
|
+
params["include_paths"] = ",".join(include_paths)
|
|
530
|
+
if exclude_paths:
|
|
531
|
+
params["exclude_paths"] = ",".join(exclude_paths)
|
|
532
|
+
if file_extensions:
|
|
533
|
+
params["file_extensions"] = ",".join(file_extensions)
|
|
534
|
+
if exclude_extensions:
|
|
535
|
+
params["exclude_extensions"] = ",".join(exclude_extensions)
|
|
536
|
+
if show_full_paths:
|
|
537
|
+
params["show_full_paths"] = "true"
|
|
538
|
+
|
|
539
|
+
response = await self.client.get(
|
|
540
|
+
f"{self.base_url}/v2/repositories/{owner_repo}/github-tree",
|
|
541
|
+
params=params
|
|
542
|
+
)
|
|
543
|
+
response.raise_for_status()
|
|
544
|
+
return response.json()
|
|
545
|
+
|
|
546
|
+
except httpx.HTTPStatusError as e:
|
|
547
|
+
raise self._handle_api_error(e)
|
|
548
|
+
except APIError:
|
|
549
|
+
raise
|
|
550
|
+
except Exception as e:
|
|
551
|
+
logger.error(f"Failed to get GitHub tree: {e}")
|
|
552
|
+
raise APIError(f"Failed to get GitHub tree: {str(e)}")
|
|
553
|
+
|
|
436
554
|
# Data Source methods
|
|
437
555
|
|
|
438
556
|
async def create_data_source(
|
|
@@ -449,15 +567,16 @@ class NIAApiClient:
|
|
|
449
567
|
) -> Dict[str, Any]:
|
|
450
568
|
"""Create a new documentation/web data source."""
|
|
451
569
|
try:
|
|
570
|
+
effective_max_age = 3600 if max_age is None else max_age
|
|
571
|
+
|
|
452
572
|
payload = {
|
|
453
573
|
"url": url,
|
|
454
574
|
"url_patterns": url_patterns or [],
|
|
455
|
-
"exclude_patterns": exclude_patterns or []
|
|
575
|
+
"exclude_patterns": exclude_patterns or [],
|
|
576
|
+
"max_age": effective_max_age
|
|
456
577
|
}
|
|
457
578
|
|
|
458
579
|
# Add optional parameters
|
|
459
|
-
if max_age is not None:
|
|
460
|
-
payload["max_age"] = max_age
|
|
461
580
|
# Don't hardcode formats - let backend defaults apply
|
|
462
581
|
# This allows screenshots to be captured by default
|
|
463
582
|
if only_main_content is not None:
|
|
@@ -535,6 +654,213 @@ class NIAApiClient:
|
|
|
535
654
|
logger.error(f"Failed to rename data source: {e}")
|
|
536
655
|
raise APIError(f"Failed to rename data source: {str(e)}")
|
|
537
656
|
|
|
657
|
+
# Documentation Virtual Filesystem Methods
|
|
658
|
+
|
|
659
|
+
async def get_doc_tree(self, source_id: str) -> Dict[str, Any]:
|
|
660
|
+
"""Get filesystem tree structure of indexed documentation."""
|
|
661
|
+
try:
|
|
662
|
+
response = await self.client.get(f"{self.base_url}/v2/data-sources/{source_id}/tree")
|
|
663
|
+
response.raise_for_status()
|
|
664
|
+
return response.json()
|
|
665
|
+
|
|
666
|
+
except httpx.HTTPStatusError as e:
|
|
667
|
+
raise self._handle_api_error(e)
|
|
668
|
+
except Exception as e:
|
|
669
|
+
logger.error(f"Failed to get documentation tree: {e}")
|
|
670
|
+
raise APIError(f"Failed to get documentation tree: {str(e)}")
|
|
671
|
+
|
|
672
|
+
async def get_doc_ls(self, source_id: str, path: str = "/") -> Dict[str, Any]:
|
|
673
|
+
"""List contents of a virtual directory in the documentation."""
|
|
674
|
+
try:
|
|
675
|
+
response = await self.client.get(
|
|
676
|
+
f"{self.base_url}/v2/data-sources/{source_id}/ls",
|
|
677
|
+
params={"path": path}
|
|
678
|
+
)
|
|
679
|
+
response.raise_for_status()
|
|
680
|
+
return response.json()
|
|
681
|
+
|
|
682
|
+
except httpx.HTTPStatusError as e:
|
|
683
|
+
raise self._handle_api_error(e)
|
|
684
|
+
except Exception as e:
|
|
685
|
+
logger.error(f"Failed to list documentation directory: {e}")
|
|
686
|
+
raise APIError(f"Failed to list documentation directory: {str(e)}")
|
|
687
|
+
|
|
688
|
+
async def get_doc_read(
|
|
689
|
+
self,
|
|
690
|
+
source_id: str,
|
|
691
|
+
path: str,
|
|
692
|
+
line_start: Optional[int] = None,
|
|
693
|
+
line_end: Optional[int] = None,
|
|
694
|
+
max_length: Optional[int] = None
|
|
695
|
+
) -> Dict[str, Any]:
|
|
696
|
+
"""Read content of a documentation page by virtual filesystem path.
|
|
697
|
+
|
|
698
|
+
Args:
|
|
699
|
+
source_id: Data source ID
|
|
700
|
+
path: Virtual path to the page
|
|
701
|
+
line_start: Start line (1-based, inclusive)
|
|
702
|
+
line_end: End line (1-based, inclusive)
|
|
703
|
+
max_length: Max characters to return
|
|
704
|
+
"""
|
|
705
|
+
try:
|
|
706
|
+
params = {"path": path}
|
|
707
|
+
if line_start is not None:
|
|
708
|
+
params["line_start"] = line_start
|
|
709
|
+
if line_end is not None:
|
|
710
|
+
params["line_end"] = line_end
|
|
711
|
+
if max_length is not None:
|
|
712
|
+
params["max_length"] = max_length
|
|
713
|
+
|
|
714
|
+
response = await self.client.get(
|
|
715
|
+
f"{self.base_url}/v2/data-sources/{source_id}/read",
|
|
716
|
+
params=params
|
|
717
|
+
)
|
|
718
|
+
response.raise_for_status()
|
|
719
|
+
return response.json()
|
|
720
|
+
|
|
721
|
+
except httpx.HTTPStatusError as e:
|
|
722
|
+
raise self._handle_api_error(e)
|
|
723
|
+
except Exception as e:
|
|
724
|
+
logger.error(f"Failed to read documentation file: {e}")
|
|
725
|
+
raise APIError(f"Failed to read documentation file: {str(e)}")
|
|
726
|
+
|
|
727
|
+
async def post_doc_grep(
|
|
728
|
+
self,
|
|
729
|
+
source_id: str,
|
|
730
|
+
pattern: str,
|
|
731
|
+
path: str = "/",
|
|
732
|
+
context_lines: Optional[int] = None,
|
|
733
|
+
A: Optional[int] = None,
|
|
734
|
+
B: Optional[int] = None,
|
|
735
|
+
case_sensitive: bool = False,
|
|
736
|
+
whole_word: bool = False,
|
|
737
|
+
fixed_string: bool = False,
|
|
738
|
+
max_matches_per_file: int = 10,
|
|
739
|
+
max_total_matches: int = 100,
|
|
740
|
+
output_mode: str = "content",
|
|
741
|
+
highlight: bool = False
|
|
742
|
+
) -> Dict[str, Any]:
|
|
743
|
+
"""Search documentation content with regex pattern.
|
|
744
|
+
|
|
745
|
+
Args:
|
|
746
|
+
source_id: Data source ID
|
|
747
|
+
pattern: Regex pattern to search for
|
|
748
|
+
path: Limit search to this path prefix
|
|
749
|
+
context_lines: Lines before AND after (shorthand for A/B)
|
|
750
|
+
A: Lines after each match (like grep -A)
|
|
751
|
+
B: Lines before each match (like grep -B)
|
|
752
|
+
case_sensitive: Case-sensitive matching
|
|
753
|
+
whole_word: Match whole words only
|
|
754
|
+
fixed_string: Treat pattern as literal string
|
|
755
|
+
max_matches_per_file: Max matches per file
|
|
756
|
+
max_total_matches: Max total matches
|
|
757
|
+
output_mode: Output format ('content', 'files_with_matches', 'count')
|
|
758
|
+
highlight: Add >>markers<< around matched text
|
|
759
|
+
"""
|
|
760
|
+
try:
|
|
761
|
+
body = {
|
|
762
|
+
"pattern": pattern,
|
|
763
|
+
"path": path,
|
|
764
|
+
"case_sensitive": case_sensitive,
|
|
765
|
+
"whole_word": whole_word,
|
|
766
|
+
"fixed_string": fixed_string,
|
|
767
|
+
"max_matches_per_file": max_matches_per_file,
|
|
768
|
+
"max_total_matches": max_total_matches,
|
|
769
|
+
"output_mode": output_mode,
|
|
770
|
+
"highlight": highlight
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
# Only include optional context parameters if provided
|
|
774
|
+
if context_lines is not None:
|
|
775
|
+
body["context_lines"] = context_lines
|
|
776
|
+
if A is not None:
|
|
777
|
+
body["A"] = A
|
|
778
|
+
if B is not None:
|
|
779
|
+
body["B"] = B
|
|
780
|
+
|
|
781
|
+
response = await self.client.post(
|
|
782
|
+
f"{self.base_url}/v2/data-sources/{source_id}/grep",
|
|
783
|
+
json=body
|
|
784
|
+
)
|
|
785
|
+
response.raise_for_status()
|
|
786
|
+
return response.json()
|
|
787
|
+
|
|
788
|
+
except httpx.HTTPStatusError as e:
|
|
789
|
+
raise self._handle_api_error(e)
|
|
790
|
+
except Exception as e:
|
|
791
|
+
logger.error(f"Failed to grep documentation: {e}")
|
|
792
|
+
raise APIError(f"Failed to grep documentation: {str(e)}")
|
|
793
|
+
|
|
794
|
+
async def post_code_grep(
|
|
795
|
+
self,
|
|
796
|
+
repository: str,
|
|
797
|
+
pattern: str,
|
|
798
|
+
path: str = "",
|
|
799
|
+
context_lines: Optional[int] = None,
|
|
800
|
+
A: Optional[int] = None,
|
|
801
|
+
B: Optional[int] = None,
|
|
802
|
+
case_sensitive: bool = False,
|
|
803
|
+
whole_word: bool = False,
|
|
804
|
+
fixed_string: bool = False,
|
|
805
|
+
max_matches_per_file: int = 10,
|
|
806
|
+
max_total_matches: int = 100,
|
|
807
|
+
output_mode: str = "content",
|
|
808
|
+
highlight: bool = False,
|
|
809
|
+
exhaustive: bool = False
|
|
810
|
+
) -> Dict[str, Any]:
|
|
811
|
+
"""Search repository code with regex pattern.
|
|
812
|
+
|
|
813
|
+
Args:
|
|
814
|
+
repository: Repository identifier (owner/repo format)
|
|
815
|
+
pattern: Regex pattern to search for
|
|
816
|
+
path: Limit search to this file path prefix
|
|
817
|
+
context_lines: Lines before AND after (shorthand for A/B)
|
|
818
|
+
A: Lines after each match (like grep -A)
|
|
819
|
+
B: Lines before each match (like grep -B)
|
|
820
|
+
case_sensitive: Case-sensitive matching
|
|
821
|
+
whole_word: Match whole words only
|
|
822
|
+
fixed_string: Treat pattern as literal string
|
|
823
|
+
max_matches_per_file: Max matches per file
|
|
824
|
+
max_total_matches: Max total matches
|
|
825
|
+
output_mode: Output format ('content', 'files_with_matches', 'count')
|
|
826
|
+
highlight: Add >>markers<< around matched text
|
|
827
|
+
exhaustive: When True, searches ALL chunks instead of BM25 top-k
|
|
828
|
+
"""
|
|
829
|
+
try:
|
|
830
|
+
body = {
|
|
831
|
+
"pattern": pattern,
|
|
832
|
+
"path": path,
|
|
833
|
+
"case_sensitive": case_sensitive,
|
|
834
|
+
"whole_word": whole_word,
|
|
835
|
+
"fixed_string": fixed_string,
|
|
836
|
+
"max_matches_per_file": max_matches_per_file,
|
|
837
|
+
"max_total_matches": max_total_matches,
|
|
838
|
+
"output_mode": output_mode,
|
|
839
|
+
"highlight": highlight,
|
|
840
|
+
"exhaustive": exhaustive
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
# Only include optional context parameters if provided
|
|
844
|
+
if context_lines is not None:
|
|
845
|
+
body["context_lines"] = context_lines
|
|
846
|
+
if A is not None:
|
|
847
|
+
body["A"] = A
|
|
848
|
+
if B is not None:
|
|
849
|
+
body["B"] = B
|
|
850
|
+
|
|
851
|
+
response = await self.client.post(
|
|
852
|
+
f"{self.base_url}/v2/repositories/{quote(repository, safe='')}/grep",
|
|
853
|
+
json=body
|
|
854
|
+
)
|
|
855
|
+
response.raise_for_status()
|
|
856
|
+
return response.json()
|
|
857
|
+
|
|
858
|
+
except httpx.HTTPStatusError as e:
|
|
859
|
+
raise self._handle_api_error(e)
|
|
860
|
+
except Exception as e:
|
|
861
|
+
logger.error(f"Failed to grep repository code: {e}")
|
|
862
|
+
raise APIError(f"Failed to grep repository code: {str(e)}")
|
|
863
|
+
|
|
538
864
|
async def query_unified(
|
|
539
865
|
self,
|
|
540
866
|
messages: List[Dict[str, str]],
|
|
@@ -569,11 +895,10 @@ class NIAApiClient:
|
|
|
569
895
|
else:
|
|
570
896
|
# Convert other types to string
|
|
571
897
|
source_list.append(str(source))
|
|
572
|
-
|
|
573
|
-
#
|
|
574
|
-
if
|
|
575
|
-
|
|
576
|
-
|
|
898
|
+
|
|
899
|
+
# NOTE: Don't validate here - let backend handle auto-hint generation
|
|
900
|
+
# The backend will generate hints if both lists are empty
|
|
901
|
+
|
|
577
902
|
payload = {
|
|
578
903
|
"messages": messages,
|
|
579
904
|
"repositories": repo_list,
|
|
@@ -656,22 +981,54 @@ class NIAApiClient:
|
|
|
656
981
|
payload = {
|
|
657
982
|
"query": query,
|
|
658
983
|
}
|
|
659
|
-
|
|
984
|
+
|
|
660
985
|
if output_format:
|
|
661
986
|
payload["output_format"] = output_format
|
|
662
|
-
|
|
987
|
+
|
|
663
988
|
response = await self.client.post(
|
|
664
989
|
f"{self.base_url}/v2/deep-research",
|
|
665
990
|
json=payload
|
|
666
991
|
)
|
|
667
992
|
response.raise_for_status()
|
|
668
993
|
return response.json()
|
|
669
|
-
|
|
994
|
+
|
|
670
995
|
except httpx.HTTPStatusError as e:
|
|
671
996
|
raise self._handle_api_error(e)
|
|
672
997
|
except Exception as e:
|
|
673
998
|
raise APIError(f"Deep research failed: {str(e)}")
|
|
674
|
-
|
|
999
|
+
|
|
1000
|
+
async def oracle_research(
|
|
1001
|
+
self,
|
|
1002
|
+
query: str,
|
|
1003
|
+
repositories: Optional[List[str]] = None,
|
|
1004
|
+
data_sources: Optional[List[str]] = None,
|
|
1005
|
+
output_format: Optional[str] = None
|
|
1006
|
+
) -> Dict[str, Any]:
|
|
1007
|
+
"""Call the in-house Oracle research agent."""
|
|
1008
|
+
try:
|
|
1009
|
+
payload: Dict[str, Any] = {
|
|
1010
|
+
"query": query,
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
if repositories:
|
|
1014
|
+
payload["repositories"] = repositories
|
|
1015
|
+
if data_sources:
|
|
1016
|
+
payload["data_sources"] = data_sources
|
|
1017
|
+
if output_format:
|
|
1018
|
+
payload["output_format"] = output_format
|
|
1019
|
+
|
|
1020
|
+
response = await self.client.post(
|
|
1021
|
+
f"{self.base_url}/v2/oracle",
|
|
1022
|
+
json=payload
|
|
1023
|
+
)
|
|
1024
|
+
response.raise_for_status()
|
|
1025
|
+
return response.json()
|
|
1026
|
+
|
|
1027
|
+
except httpx.HTTPStatusError as e:
|
|
1028
|
+
raise self._handle_api_error(e)
|
|
1029
|
+
except Exception as e:
|
|
1030
|
+
raise APIError(f"Oracle research failed: {str(e)}")
|
|
1031
|
+
|
|
675
1032
|
async def get_source_content(
|
|
676
1033
|
self,
|
|
677
1034
|
source_type: str,
|
|
@@ -723,76 +1080,6 @@ class NIAApiClient:
|
|
|
723
1080
|
raise self._handle_api_error(e)
|
|
724
1081
|
except Exception as e:
|
|
725
1082
|
raise APIError(f"Failed to submit bug report: {str(e)}")
|
|
726
|
-
|
|
727
|
-
async def index_local_filesystem(
|
|
728
|
-
self,
|
|
729
|
-
directory_path: str,
|
|
730
|
-
inclusion_patterns: List[str] = None,
|
|
731
|
-
exclusion_patterns: List[str] = None,
|
|
732
|
-
max_file_size_mb: int = 50
|
|
733
|
-
) -> Dict[str, Any]:
|
|
734
|
-
"""Index a local filesystem directory."""
|
|
735
|
-
try:
|
|
736
|
-
payload = {
|
|
737
|
-
"directory_path": directory_path,
|
|
738
|
-
"inclusion_patterns": inclusion_patterns or [],
|
|
739
|
-
"exclusion_patterns": exclusion_patterns or [],
|
|
740
|
-
"max_file_size_mb": max_file_size_mb
|
|
741
|
-
}
|
|
742
|
-
|
|
743
|
-
response = await self.client.post(
|
|
744
|
-
f"{self.base_url}/v2/local-filesystem",
|
|
745
|
-
json=payload
|
|
746
|
-
)
|
|
747
|
-
response.raise_for_status()
|
|
748
|
-
return response.json()
|
|
749
|
-
|
|
750
|
-
except httpx.HTTPStatusError as e:
|
|
751
|
-
raise self._handle_api_error(e)
|
|
752
|
-
except Exception as e:
|
|
753
|
-
raise APIError(f"Failed to index local filesystem: {str(e)}")
|
|
754
|
-
|
|
755
|
-
async def scan_local_filesystem(
|
|
756
|
-
self,
|
|
757
|
-
directory_path: str,
|
|
758
|
-
inclusion_patterns: List[str] = None,
|
|
759
|
-
exclusion_patterns: List[str] = None,
|
|
760
|
-
max_file_size_mb: int = 50
|
|
761
|
-
) -> Dict[str, Any]:
|
|
762
|
-
"""Scan a local filesystem directory to preview what would be indexed."""
|
|
763
|
-
try:
|
|
764
|
-
payload = {
|
|
765
|
-
"directory_path": directory_path,
|
|
766
|
-
"inclusion_patterns": inclusion_patterns or [],
|
|
767
|
-
"exclusion_patterns": exclusion_patterns or [],
|
|
768
|
-
"max_file_size_mb": max_file_size_mb
|
|
769
|
-
}
|
|
770
|
-
|
|
771
|
-
response = await self.client.post(
|
|
772
|
-
f"{self.base_url}/v2/local-filesystem/scan",
|
|
773
|
-
json=payload
|
|
774
|
-
)
|
|
775
|
-
response.raise_for_status()
|
|
776
|
-
return response.json()
|
|
777
|
-
|
|
778
|
-
except httpx.HTTPStatusError as e:
|
|
779
|
-
raise self._handle_api_error(e)
|
|
780
|
-
except Exception as e:
|
|
781
|
-
raise APIError(f"Failed to scan local filesystem: {str(e)}")
|
|
782
|
-
|
|
783
|
-
async def check_local_filesystem_status(self, source_id: str) -> Dict[str, Any]:
|
|
784
|
-
"""Check the indexing status of a local filesystem source."""
|
|
785
|
-
try:
|
|
786
|
-
response = await self.client.get(
|
|
787
|
-
f"{self.base_url}/v2/local-filesystem/{source_id}"
|
|
788
|
-
)
|
|
789
|
-
response.raise_for_status()
|
|
790
|
-
return response.json()
|
|
791
|
-
|
|
792
|
-
except httpx.HTTPStatusError as e:
|
|
793
|
-
raise self._handle_api_error(e)
|
|
794
|
-
except Exception as e:
|
|
795
|
-
raise APIError(f"Failed to check local filesystem status: {str(e)}")
|
|
796
1083
|
|
|
797
1084
|
# ========================================================================
|
|
798
1085
|
# CHROMA PACKAGE SEARCH METHODS
|
|
@@ -928,9 +1215,13 @@ class NIAApiClient:
|
|
|
928
1215
|
tags: List[str] = None,
|
|
929
1216
|
metadata: Dict[str, Any] = None,
|
|
930
1217
|
nia_references: Optional[Dict[str, Any]] = None,
|
|
931
|
-
edited_files: Optional[List[Dict[str, Any]]] = None
|
|
1218
|
+
edited_files: Optional[List[Dict[str, Any]]] = None,
|
|
1219
|
+
workspace_metadata: Optional[Dict[str, Any]] = None,
|
|
1220
|
+
file_metadata: Optional[Dict[str, Any]] = None,
|
|
1221
|
+
workspace_override: Optional[str] = None,
|
|
1222
|
+
cwd: Optional[str] = None
|
|
932
1223
|
) -> Dict[str, Any]:
|
|
933
|
-
"""Save a conversation context for cross-agent sharing."""
|
|
1224
|
+
"""Save a conversation context for cross-agent sharing with workspace awareness."""
|
|
934
1225
|
try:
|
|
935
1226
|
payload = {
|
|
936
1227
|
"title": title,
|
|
@@ -947,6 +1238,16 @@ class NIAApiClient:
|
|
|
947
1238
|
if edited_files is not None:
|
|
948
1239
|
payload["edited_files"] = edited_files
|
|
949
1240
|
|
|
1241
|
+
# Add workspace-aware fields
|
|
1242
|
+
if workspace_metadata is not None:
|
|
1243
|
+
payload["workspace_metadata"] = workspace_metadata
|
|
1244
|
+
if file_metadata is not None:
|
|
1245
|
+
payload["file_metadata"] = file_metadata
|
|
1246
|
+
if workspace_override is not None:
|
|
1247
|
+
payload["workspace_override"] = workspace_override
|
|
1248
|
+
if cwd is not None:
|
|
1249
|
+
payload["cwd"] = cwd
|
|
1250
|
+
|
|
950
1251
|
response = await self.client.post(
|
|
951
1252
|
f"{self.base_url}/v2/contexts",
|
|
952
1253
|
json=payload
|
|
@@ -964,9 +1265,14 @@ class NIAApiClient:
|
|
|
964
1265
|
limit: int = 20,
|
|
965
1266
|
offset: int = 0,
|
|
966
1267
|
tags: Optional[str] = None,
|
|
967
|
-
agent_source: Optional[str] = None
|
|
1268
|
+
agent_source: Optional[str] = None,
|
|
1269
|
+
scope: Optional[str] = None,
|
|
1270
|
+
workspace: Optional[str] = None,
|
|
1271
|
+
directory: Optional[str] = None,
|
|
1272
|
+
file_overlap: Optional[str] = None,
|
|
1273
|
+
cwd: Optional[str] = None
|
|
968
1274
|
) -> Dict[str, Any]:
|
|
969
|
-
"""List user's conversation contexts with pagination and
|
|
1275
|
+
"""List user's conversation contexts with pagination, filtering, and workspace awareness."""
|
|
970
1276
|
try:
|
|
971
1277
|
params = {
|
|
972
1278
|
"limit": limit,
|
|
@@ -978,6 +1284,18 @@ class NIAApiClient:
|
|
|
978
1284
|
if agent_source:
|
|
979
1285
|
params["agent_source"] = agent_source
|
|
980
1286
|
|
|
1287
|
+
# Add workspace-aware filters
|
|
1288
|
+
if scope:
|
|
1289
|
+
params["scope"] = scope
|
|
1290
|
+
if workspace:
|
|
1291
|
+
params["workspace"] = workspace
|
|
1292
|
+
if directory:
|
|
1293
|
+
params["directory"] = directory
|
|
1294
|
+
if file_overlap:
|
|
1295
|
+
params["file_overlap"] = file_overlap
|
|
1296
|
+
if cwd:
|
|
1297
|
+
params["cwd"] = cwd
|
|
1298
|
+
|
|
981
1299
|
response = await self.client.get(
|
|
982
1300
|
f"{self.base_url}/v2/contexts",
|
|
983
1301
|
params=params
|
|
@@ -1062,7 +1380,7 @@ class NIAApiClient:
|
|
|
1062
1380
|
tags: Optional[str] = None,
|
|
1063
1381
|
agent_source: Optional[str] = None
|
|
1064
1382
|
) -> Dict[str, Any]:
|
|
1065
|
-
"""Search conversation contexts by content, title, or summary."""
|
|
1383
|
+
"""Search conversation contexts by content, title, or summary (keyword search)."""
|
|
1066
1384
|
try:
|
|
1067
1385
|
params = {
|
|
1068
1386
|
"q": query,
|
|
@@ -1084,4 +1402,90 @@ class NIAApiClient:
|
|
|
1084
1402
|
except httpx.HTTPStatusError as e:
|
|
1085
1403
|
raise self._handle_api_error(e)
|
|
1086
1404
|
except Exception as e:
|
|
1087
|
-
raise APIError(f"Failed to search contexts: {str(e)}")
|
|
1405
|
+
raise APIError(f"Failed to search contexts: {str(e)}")
|
|
1406
|
+
|
|
1407
|
+
async def search_contexts_semantic(
|
|
1408
|
+
self,
|
|
1409
|
+
query: str,
|
|
1410
|
+
limit: int = 20,
|
|
1411
|
+
organization_id: Optional[str] = None,
|
|
1412
|
+
cwd: Optional[str] = None,
|
|
1413
|
+
include_highlights: bool = True,
|
|
1414
|
+
workspace_filter: Optional[str] = None
|
|
1415
|
+
) -> Dict[str, Any]:
|
|
1416
|
+
"""Search conversation contexts using semantic search (vector + BM25 hybrid)."""
|
|
1417
|
+
try:
|
|
1418
|
+
params = {
|
|
1419
|
+
"q": query,
|
|
1420
|
+
"limit": limit,
|
|
1421
|
+
"include_highlights": include_highlights
|
|
1422
|
+
}
|
|
1423
|
+
|
|
1424
|
+
if organization_id:
|
|
1425
|
+
params["organization_id"] = organization_id
|
|
1426
|
+
if cwd:
|
|
1427
|
+
params["cwd"] = cwd
|
|
1428
|
+
if workspace_filter:
|
|
1429
|
+
params["workspace_filter"] = workspace_filter
|
|
1430
|
+
|
|
1431
|
+
response = await self.client.get(
|
|
1432
|
+
f"{self.base_url}/v2/contexts/semantic-search",
|
|
1433
|
+
params=params
|
|
1434
|
+
)
|
|
1435
|
+
response.raise_for_status()
|
|
1436
|
+
return response.json()
|
|
1437
|
+
|
|
1438
|
+
except httpx.HTTPStatusError as e:
|
|
1439
|
+
raise self._handle_api_error(e)
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
raise APIError(f"Failed to search contexts semantically: {str(e)}")
|
|
1442
|
+
|
|
1443
|
+
# =========================================================================
|
|
1444
|
+
# Universal Search
|
|
1445
|
+
# =========================================================================
|
|
1446
|
+
|
|
1447
|
+
async def universal_search(
|
|
1448
|
+
self,
|
|
1449
|
+
query: str,
|
|
1450
|
+
top_k: int = 20,
|
|
1451
|
+
include_repos: bool = True,
|
|
1452
|
+
include_docs: bool = True,
|
|
1453
|
+
alpha: float = 0.7,
|
|
1454
|
+
compress_output: bool = False
|
|
1455
|
+
) -> Dict[str, Any]:
|
|
1456
|
+
"""
|
|
1457
|
+
Search across ALL indexed public sources using TurboPuffer hybrid search.
|
|
1458
|
+
|
|
1459
|
+
Args:
|
|
1460
|
+
query: Natural language search query
|
|
1461
|
+
top_k: Total number of results to return (default: 20, max: 100)
|
|
1462
|
+
include_repos: Include repository sources (default: True)
|
|
1463
|
+
include_docs: Include documentation sources (default: True)
|
|
1464
|
+
alpha: Weight for vector search vs BM25 (default: 0.7 = 70% vector)
|
|
1465
|
+
compress_output: Use AI (Gemini Flash) to compress results into concise answer
|
|
1466
|
+
|
|
1467
|
+
Returns:
|
|
1468
|
+
Dict with results, sources_searched, query_time_ms, optional errors, and optional answer
|
|
1469
|
+
"""
|
|
1470
|
+
try:
|
|
1471
|
+
payload = {
|
|
1472
|
+
"query": query,
|
|
1473
|
+
"top_k": top_k,
|
|
1474
|
+
"include_repos": include_repos,
|
|
1475
|
+
"include_docs": include_docs,
|
|
1476
|
+
"alpha": alpha,
|
|
1477
|
+
"compress_output": compress_output
|
|
1478
|
+
}
|
|
1479
|
+
|
|
1480
|
+
response = await self.client.post(
|
|
1481
|
+
f"{self.base_url}/v2/universal-search",
|
|
1482
|
+
json=payload
|
|
1483
|
+
)
|
|
1484
|
+
response.raise_for_status()
|
|
1485
|
+
return response.json()
|
|
1486
|
+
|
|
1487
|
+
except httpx.HTTPStatusError as e:
|
|
1488
|
+
raise self._handle_api_error(e)
|
|
1489
|
+
except Exception as e:
|
|
1490
|
+
logger.error(f"Universal search failed: {e}")
|
|
1491
|
+
raise APIError(f"Failed to perform universal search: {str(e)}")
|