jarvis-ai-assistant 0.1.104__py3-none-any.whl → 0.1.106__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (62) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/agent.py +124 -67
  3. jarvis/jarvis_code_agent/code_agent.py +133 -22
  4. jarvis/jarvis_code_agent/patch.py +4 -7
  5. jarvis/jarvis_code_agent/relevant_files.py +163 -72
  6. jarvis/jarvis_codebase/main.py +36 -15
  7. jarvis/jarvis_lsp/base.py +143 -0
  8. jarvis/jarvis_lsp/cpp.py +134 -0
  9. jarvis/jarvis_lsp/go.py +140 -0
  10. jarvis/jarvis_lsp/python.py +135 -0
  11. jarvis/jarvis_lsp/registry.py +234 -0
  12. jarvis/jarvis_lsp/rust.py +142 -0
  13. jarvis/jarvis_platform/__init__.py +3 -0
  14. jarvis/{models → jarvis_platform}/ai8.py +1 -1
  15. jarvis/{models → jarvis_platform}/kimi.py +1 -1
  16. jarvis/{models → jarvis_platform}/ollama.py +1 -1
  17. jarvis/{models → jarvis_platform}/openai.py +1 -1
  18. jarvis/{models → jarvis_platform}/oyi.py +1 -1
  19. jarvis/{models → jarvis_platform}/registry.py +11 -11
  20. jarvis/{jarvis_platform → jarvis_platform_manager}/main.py +1 -1
  21. jarvis/jarvis_rag/main.py +6 -6
  22. jarvis/jarvis_smart_shell/main.py +3 -3
  23. jarvis/jarvis_tools/__init__.py +0 -0
  24. jarvis/{tools → jarvis_tools}/ask_user.py +1 -1
  25. jarvis/{tools → jarvis_tools}/code_review.py +34 -8
  26. jarvis/jarvis_tools/create_code_agent.py +115 -0
  27. jarvis/{tools → jarvis_tools}/create_sub_agent.py +1 -1
  28. jarvis/jarvis_tools/deep_thinking.py +160 -0
  29. jarvis/jarvis_tools/deep_thinking_agent.py +146 -0
  30. jarvis/{tools → jarvis_tools}/git_commiter.py +2 -2
  31. jarvis/jarvis_tools/lsp_find_definition.py +134 -0
  32. jarvis/jarvis_tools/lsp_find_references.py +111 -0
  33. jarvis/jarvis_tools/lsp_get_diagnostics.py +121 -0
  34. jarvis/jarvis_tools/lsp_get_document_symbols.py +87 -0
  35. jarvis/jarvis_tools/lsp_prepare_rename.py +130 -0
  36. jarvis/jarvis_tools/lsp_validate_edit.py +141 -0
  37. jarvis/{tools → jarvis_tools}/methodology.py +6 -1
  38. jarvis/{tools → jarvis_tools}/registry.py +6 -5
  39. jarvis/{tools → jarvis_tools}/search.py +2 -2
  40. jarvis/utils.py +68 -25
  41. {jarvis_ai_assistant-0.1.104.dist-info → jarvis_ai_assistant-0.1.106.dist-info}/METADATA +23 -16
  42. jarvis_ai_assistant-0.1.106.dist-info/RECORD +62 -0
  43. {jarvis_ai_assistant-0.1.104.dist-info → jarvis_ai_assistant-0.1.106.dist-info}/entry_points.txt +3 -4
  44. jarvis/models/__init__.py +0 -3
  45. jarvis/tools/create_code_test_agent.py +0 -115
  46. jarvis/tools/create_ctags_agent.py +0 -164
  47. jarvis/tools/find_in_codebase.py +0 -78
  48. jarvis_ai_assistant-0.1.104.dist-info/RECORD +0 -50
  49. /jarvis/{models → jarvis_platform}/base.py +0 -0
  50. /jarvis/{tools → jarvis_platform_manager}/__init__.py +0 -0
  51. /jarvis/{tools → jarvis_tools}/ask_codebase.py +0 -0
  52. /jarvis/{tools → jarvis_tools}/base.py +0 -0
  53. /jarvis/{tools → jarvis_tools}/chdir.py +0 -0
  54. /jarvis/{tools → jarvis_tools}/execute_shell.py +0 -0
  55. /jarvis/{tools → jarvis_tools}/file_operation.py +0 -0
  56. /jarvis/{tools → jarvis_tools}/rag.py +0 -0
  57. /jarvis/{tools → jarvis_tools}/read_code.py +0 -0
  58. /jarvis/{tools → jarvis_tools}/read_webpage.py +0 -0
  59. /jarvis/{tools → jarvis_tools}/select_code_files.py +0 -0
  60. {jarvis_ai_assistant-0.1.104.dist-info → jarvis_ai_assistant-0.1.106.dist-info}/LICENSE +0 -0
  61. {jarvis_ai_assistant-0.1.104.dist-info → jarvis_ai_assistant-0.1.106.dist-info}/WHEEL +0 -0
  62. {jarvis_ai_assistant-0.1.104.dist-info → jarvis_ai_assistant-0.1.106.dist-info}/top_level.txt +0 -0
@@ -6,11 +6,170 @@ import yaml
6
6
  from jarvis.agent import Agent
7
7
  from jarvis.jarvis_code_agent.file_select import select_files
8
8
  from jarvis.jarvis_codebase.main import CodeBase
9
- from jarvis.models.registry import PlatformRegistry
10
- from jarvis.tools.registry import ToolRegistry
9
+ from jarvis.jarvis_platform.registry import PlatformRegistry
10
+ from jarvis.jarvis_tools.registry import ToolRegistry
11
11
  from jarvis.utils import OutputType, PrettyOutput, is_disable_codebase
12
12
 
13
13
 
14
+ def find_relevant_files_from_agent(user_input: str, files_from_codebase: List[str]) -> List[str]:
15
+ find_file_tool_registry = ToolRegistry()
16
+ find_file_tool_registry.use_tools(["read_code",
17
+ "execute_shell",
18
+ "lsp_get_document_symbols",
19
+ "lsp_get_diagnostics",
20
+ "lsp_find_references",
21
+ "lsp_find_definition",
22
+ "lsp_prepare_rename",
23
+ "lsp_validate_edit"])
24
+ find_file_agent = Agent(
25
+ system_prompt="""You are a file agent, you are responsible for finding files related to the user's requirement.
26
+
27
+ THINKING PROCESS:
28
+ 1. Initial File Verification
29
+ ```
30
+ Thought: Let me examine the suggested files...
31
+ Action: For each suggested file:
32
+ - Use read_code to check content
33
+ - Use LSP tools to analyze structure
34
+ Observation: Found that...
35
+
36
+ Thought: Evaluate actual relevance...
37
+ Action: For each file:
38
+ - Check direct relationship to requirement
39
+ - Verify functionality matches
40
+ - Look for clear evidence of relevance
41
+ Observation: After analysis:
42
+ - Relevant files: [list with reasons]
43
+ - Removed files: [list with reasons]
44
+
45
+ Thought: Verify removal decisions...
46
+ Action: Double-check each removed file
47
+ Observation: Removal justification:
48
+ - File X: [specific reason for removal]
49
+ - File Y: [specific reason for removal]
50
+ ```
51
+
52
+ 2. Additional File Search
53
+ ```
54
+ Thought: Plan search strategy for missing aspects...
55
+ Action: Use combination of tools:
56
+ - git grep for key terms
57
+ - LSP tools for references
58
+ - Dependency analysis
59
+ Observation: Found additional files...
60
+
61
+ Thought: Validate new files...
62
+ Action: For each new file:
63
+ - Verify direct relevance
64
+ - Check for false positives
65
+ - Document clear evidence
66
+ Observation: After validation:
67
+ - Confirmed relevant: [list with evidence]
68
+ - Excluded: [list with reasons]
69
+ ```
70
+
71
+ 3. Comprehensive Analysis
72
+ ```
73
+ Thought: Final relevance check...
74
+ Action: For each remaining file:
75
+ - Verify essential to requirement
76
+ - Check for indirect inclusions
77
+ - Validate necessity
78
+ Observation: Final cleanup:
79
+ - Core files: [list with roles]
80
+ - Removed borderline cases: [list with reasons]
81
+
82
+ Thought: Ensure minimal complete set...
83
+ Action: Review final file list
84
+ Observation: Confirmed each file is:
85
+ - Directly relevant
86
+ - Essential for requirement
87
+ - Supported by evidence
88
+ ```
89
+
90
+ FILE READING GUIDELINES:
91
+ 1. For Large Files (>200 lines):
92
+ ```
93
+ Thought: This file is large, need targeted reading...
94
+ Action:
95
+ - First: execute_shell("grep -n 'key_term' path/to/file")
96
+ - Then: read_code("path/to/file", start_line=x-10, end_line=x+20)
97
+ Observation: Relevance analysis:
98
+ - Relevant sections: [details]
99
+ - Irrelevant sections: [reasons to ignore]
100
+ ```
101
+
102
+ 2. For Small Files:
103
+ ```
104
+ Thought: This is a small file, can read entirely...
105
+ Action: read_code("path/to/file")
106
+ Observation: Relevance analysis:
107
+ - Key evidence: [details]
108
+ - Irrelevant aspects: [what to ignore]
109
+ ```
110
+
111
+ VERIFICATION RULES:
112
+ - Remove files without clear relevance evidence
113
+ - Exclude files with only tangential relationships
114
+ - Delete files that only contain imports/references
115
+ - Remove files if relevance is uncertain
116
+ - Document specific reasons for each removal
117
+ - Keep only files essential to requirement
118
+ - Maintain minimal complete set
119
+
120
+ OUTPUT FORMAT:
121
+ <FILE_PATH>
122
+ - file_path1 # KEEP: [specific evidence of relevance]
123
+ - file_path2 # KEEP: [clear relationship to requirement]
124
+ </FILE_PATH>
125
+ """,
126
+ name="FindFileAgent",
127
+ is_sub_agent=True,
128
+ tool_registry=find_file_tool_registry,
129
+ platform=PlatformRegistry().get_normal_platform(),
130
+ auto_complete=True,
131
+ summary_prompt="""Please provide only the verified essential files with evidence:
132
+ <FILE_PATH>
133
+ - file_path1 # KEEP: [concrete evidence of necessity]
134
+ - file_path2 # KEEP: [specific relevance proof]
135
+ </FILE_PATH>
136
+ """)
137
+
138
+ prompt = f"Find files related to: '{user_input}'\n"
139
+ if files_from_codebase:
140
+ prompt += f"""
141
+ Potentially related files: {files_from_codebase}
142
+
143
+ ANALYSIS REQUIRED:
144
+ 1. Verify each suggested file:
145
+ - Document relevance evidence
146
+ - Identify actual relationships
147
+ - Note any missing aspects
148
+
149
+ 2. Search for additional files:
150
+ - Fill coverage gaps
151
+ - Find related components
152
+ - Locate test files
153
+
154
+ 3. Provide reasoning:
155
+ - Explain why each file is included
156
+ - Document verification process
157
+ - Note any uncertainties
158
+ """
159
+ output = find_file_agent.run(prompt)
160
+
161
+ rsp_from_agent = re.findall(r'<FILE_PATH>(.*?)</FILE_PATH>', output, re.DOTALL)
162
+ files_from_agent = []
163
+ if rsp_from_agent:
164
+ try:
165
+ files_from_agent = yaml.safe_load(rsp_from_agent[0])
166
+ except Exception as e:
167
+ files_from_agent = []
168
+ else:
169
+ files_from_agent = []
170
+ return files_from_agent
171
+
172
+
14
173
  def find_relevant_files(user_input: str, root_dir: str) -> List[str]:
15
174
  try:
16
175
  files_from_codebase = []
@@ -20,76 +179,8 @@ def find_relevant_files(user_input: str, root_dir: str) -> List[str]:
20
179
  files_from_codebase = codebase.search_similar(user_input)
21
180
 
22
181
  PrettyOutput.print("Find files by agent...", OutputType.INFO)
23
- find_file_tool_registry = ToolRegistry()
24
- find_file_tool_registry.use_tools(["read_code", "execute_shell"])
25
- find_file_agent = Agent(
26
- system_prompt="""You are a file agent, you are responsible for finding files related to the user's requirement.
27
-
28
- SEARCH STRATEGY:
29
- 1. First Pass - Quick Search:
30
- - Use `execute_shell` with git grep/find to locate potential files
31
- - Search for key terms, function names, and relevant patterns
32
- - Example: execute_shell("git grep -l 'search_term'")
33
-
34
- 2. Content Analysis:
35
- - For each potential file, analyze its content
36
- - Follow the file reading guidelines for large files
37
- - Look for:
38
- * Direct matches to requirement terms
39
- * Related functionality
40
- * Imported/referenced files
41
- * Test files for modified code
42
-
43
- FILE READING GUIDELINES:
44
- 1. For Large Files (>200 lines):
45
- - Do NOT read the entire file at once
46
- - First use grep/ctags to locate relevant sections
47
- - Then read specific sections with context
48
- - Example:
49
- * execute_shell("grep -n 'function_name' path/to/file")
50
- * read_code("path/to/file", start_line=found_line-10, end_line=found_line+20)
51
-
52
- 2. For Small Files:
53
- - Can read entire file directly
54
-
55
- IMPORTANT RULES:
56
- - Only return files that are DIRECTLY related to the requirement
57
- - Exclude false positives and loosely related files
58
- - If a file only contains imports/references, don't include it
59
- - Include both implementation and test files when relevant
60
- - If unsure about a file, use grep/read_code to verify relevance
61
- - Return empty list if no truly relevant files are found
62
- - Do NOT modify any code, only find files
63
-
64
- OUTPUT FORMAT:
65
- - Only provide file paths in the specified YAML format
66
- - No additional explanations or comments
67
- """,
68
- name="FindFileAgent",
69
- is_sub_agent=True,
70
- tool_registry=find_file_tool_registry,
71
- platform=PlatformRegistry().get_normal_platform(),
72
- auto_complete=True,
73
- summary_prompt="""Please provide the file paths as YAML list:
74
- <FILE_PATH>
75
- - file_path1
76
- - file_path2
77
- </FILE_PATH>
78
- """)
79
- prompt = f"Find files related about '{user_input}'\n"
80
- if files_from_codebase:
81
- prompt += f"\n\nFiles maybe related: {files_from_codebase}\n\n Please read above files first"
82
- output = find_file_agent.run(prompt)
83
-
84
- rsp_from_agent = re.findall(r'<FILE_PATH>(.*?)</FILE_PATH>', output, re.DOTALL)
85
- files_from_agent = []
86
- if rsp_from_agent:
87
- try:
88
- files_from_agent = yaml.safe_load(rsp_from_agent[0])
89
- except Exception as e:
90
- files_from_agent = []
91
- else:
92
- files_from_agent = []
182
+
183
+ files_from_agent = find_relevant_files_from_agent(user_input, files_from_codebase)
93
184
 
94
185
  selected_files = select_files(files_from_agent, os.getcwd())
95
186
  return selected_files
@@ -4,12 +4,10 @@ import numpy as np
4
4
  import faiss
5
5
  from typing import List, Tuple, Optional, Dict
6
6
 
7
- import yaml
8
- from jarvis.models.registry import PlatformRegistry
7
+ from jarvis.jarvis_platform.registry import PlatformRegistry
9
8
  import concurrent.futures
10
- from threading import Lock
11
9
  from concurrent.futures import ThreadPoolExecutor
12
- from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_file_md5, get_max_context_length, get_single_line_input, get_thread_count, load_embedding_model, load_rerank_model, user_confirm
10
+ from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_file_md5, get_max_context_length, get_thread_count, load_embedding_model, user_confirm
13
11
  from jarvis.utils import init_env
14
12
  import argparse
15
13
  import pickle
@@ -62,12 +60,11 @@ class CodeBase:
62
60
  return [f for f in files if not f.startswith(".jarvis")]
63
61
 
64
62
  def is_text_file(self, file_path: str):
65
- with open(file_path, "r", encoding="utf-8") as f:
66
- try:
67
- f.read()
68
- return True
69
- except UnicodeDecodeError:
70
- return False
63
+ try:
64
+ open(file_path, "r", encoding="utf-8").read()
65
+ return True
66
+ except Exception:
67
+ return False
71
68
 
72
69
  def make_description(self, file_path: str, content: str) -> str:
73
70
  model = PlatformRegistry.get_global_platform_registry().get_cheap_platform()
@@ -386,6 +383,24 @@ Content: {content}
386
383
  force: Whether to force rebuild the index, without asking the user
387
384
  """
388
385
  try:
386
+ # Clean up cache for non-existent files
387
+ files_to_delete = []
388
+ for cached_file in list(self.vector_cache.keys()):
389
+ if not os.path.exists(cached_file) or not self.is_text_file(cached_file):
390
+ files_to_delete.append(cached_file)
391
+ cache_path = self._get_cache_path(cached_file)
392
+ try:
393
+ os.remove(cache_path)
394
+ except Exception as e:
395
+ PrettyOutput.print(f"Failed to delete cache file for {cached_file}: {str(e)}",
396
+ output_type=OutputType.WARNING)
397
+
398
+ if files_to_delete:
399
+ for file_path in files_to_delete:
400
+ del self.vector_cache[file_path]
401
+ PrettyOutput.print(f"Cleaned cache for {len(files_to_delete)} non-existent files",
402
+ output_type=OutputType.INFO)
403
+
389
404
  # Update the git file list
390
405
  self.git_file_list = self.get_git_file_list()
391
406
 
@@ -642,7 +657,7 @@ Note: Only include files that have a strong connection to the query."""
642
657
  List[str]: The query variants list
643
658
  """
644
659
  model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
645
- prompt = f"""Please generate 3 different expressions based on the following query, each expression should fully convey the meaning of the original query. These expressions will be used for code search, maintain professionalism and accuracy.
660
+ prompt = f"""Please generate 3 different expressions based on the following query in English, each expression should fully convey the meaning of the original query. These expressions will be used for code search, maintain professionalism and accuracy.
646
661
  Original query: {query}
647
662
 
648
663
  Please output 3 expressions directly, separated by two line breaks, without numbering or other markers.
@@ -722,19 +737,23 @@ Please output 3 expressions directly, separated by two line breaks, without numb
722
737
 
723
738
  def ask_codebase(self, query: str, top_k: int=20) -> str:
724
739
  """Query the codebase"""
725
- results = self.search_similar(query, top_k)
726
- if not results:
740
+ reuslts_from_codebase = self.search_similar(query, top_k)
741
+
742
+ from jarvis.jarvis_code_agent.relevant_files import find_relevant_files_from_agent
743
+ results_from_agent = find_relevant_files_from_agent(query, reuslts_from_codebase)
744
+
745
+ if not results_from_agent:
727
746
  PrettyOutput.print("No related files found", output_type=OutputType.WARNING)
728
747
  return ""
729
748
 
730
749
  message = "Found related files:\n"
731
- for path in results:
750
+ for path in results_from_agent:
732
751
  message += f"File: {path}\n"
733
752
  PrettyOutput.print(message.rstrip(), output_type=OutputType.SUCCESS, lang="markdown")
734
753
 
735
754
  prompt = f"""You are a code expert, please answer the user's question based on the following file information:
736
755
  """
737
- for path in results:
756
+ for path in results_from_agent:
738
757
  try:
739
758
  if len(prompt) > self.max_context_length:
740
759
  PrettyOutput.print(f"Avoid context overflow, discard low-related file: {path}", OutputType.WARNING)
@@ -755,6 +774,8 @@ File content:
755
774
  User question: {query}
756
775
 
757
776
  Please answer the user's question in Chinese using professional language. If the provided file content is insufficient to answer the user's question, please inform the user. Never make up information.
777
+
778
+ Add reference files and code snippets at the end of the answer.
758
779
  """
759
780
  model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
760
781
  response = model.chat_until_success(prompt)
@@ -0,0 +1,143 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Optional, Tuple, Any
3
+
4
+ class BaseLSP(ABC):
5
+ """Base class for Language Server Protocol integration.
6
+
7
+ Core LSP features needed for LLM-based code editing:
8
+ 1. Code navigation and analysis
9
+ 2. Code modification validation
10
+ 3. Diagnostic information
11
+ 4. Symbol analysis
12
+ """
13
+
14
+ language: str = "" # Language identifier, should be overridden by subclasses
15
+
16
+ @abstractmethod
17
+ def initialize(self, workspace_path: str) -> bool:
18
+ """Initialize LSP server for the workspace.
19
+
20
+ Args:
21
+ workspace_path: Root path of the workspace
22
+
23
+ Returns:
24
+ bool: True if initialization successful
25
+ """
26
+ return False
27
+
28
+ @abstractmethod
29
+ def find_references(self, file_path: str, position: Tuple[int, int]) -> List[Dict[str, Any]]:
30
+ """Find all references of symbol at position.
31
+
32
+ Args:
33
+ file_path: Path to the file
34
+ position: (line, character) tuple
35
+
36
+ Returns:
37
+ List of references with location info:
38
+ [
39
+ {
40
+ "uri": "file path",
41
+ "range": {
42
+ "start": {"line": int, "character": int},
43
+ "end": {"line": int, "character": int}
44
+ }
45
+ }
46
+ ]
47
+ """
48
+ return []
49
+
50
+ @abstractmethod
51
+ def find_definition(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
52
+ """Find definition of symbol at position.
53
+
54
+ Args:
55
+ file_path: Path to the file
56
+ position: (line, character) tuple
57
+
58
+ Returns:
59
+ Location of definition:
60
+ {
61
+ "uri": "file path",
62
+ "range": {
63
+ "start": {"line": int, "character": int},
64
+ "end": {"line": int, "character": int}
65
+ }
66
+ }
67
+ """
68
+ return None
69
+
70
+ @abstractmethod
71
+ def get_document_symbols(self, file_path: str) -> List[Dict[str, Any]]:
72
+ """Get all symbols in document.
73
+
74
+ Args:
75
+ file_path: Path to the file
76
+
77
+ Returns:
78
+ List of symbols with their locations and types
79
+ """
80
+ return []
81
+
82
+ @abstractmethod
83
+ def get_diagnostics(self, file_path: str) -> List[Dict[str, Any]]:
84
+ """Get diagnostics (errors, warnings) for file.
85
+
86
+ Args:
87
+ file_path: Path to the file
88
+
89
+ Returns:
90
+ List of diagnostic items:
91
+ [
92
+ {
93
+ "range": {
94
+ "start": {"line": int, "character": int},
95
+ "end": {"line": int, "character": int}
96
+ },
97
+ "severity": 1 | 2 | 3 | 4, # Error=1, Warning=2, Info=3, Hint=4
98
+ "code": str, # Error code if any
99
+ "source": str, # Source of diagnostic (e.g. "pylint")
100
+ "message": str, # Diagnostic message
101
+ "relatedInformation": [ # Optional related info
102
+ {
103
+ "location": {
104
+ "uri": str,
105
+ "range": {...}
106
+ },
107
+ "message": str
108
+ }
109
+ ]
110
+ }
111
+ ]
112
+ """
113
+ return []
114
+
115
+ @abstractmethod
116
+ def prepare_rename(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
117
+ """Check if symbol at position can be renamed.
118
+
119
+ Args:
120
+ file_path: Path to the file
121
+ position: Symbol position
122
+
123
+ Returns:
124
+ Range that would be renamed or None if rename not allowed
125
+ """
126
+ return None
127
+
128
+ @abstractmethod
129
+ def validate_edit(self, file_path: str, edit: Dict[str, Any]) -> bool:
130
+ """Validate if proposed edit is syntactically correct.
131
+
132
+ Args:
133
+ file_path: Path to the file
134
+ edit: Edit operation in LSP format
135
+
136
+ Returns:
137
+ bool: True if edit is valid
138
+ """
139
+ return False
140
+
141
+ def shutdown(self):
142
+ """Shutdown LSP server cleanly."""
143
+ pass
@@ -0,0 +1,134 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ from typing import List, Dict, Optional, Tuple, Any
5
+ import json
6
+ from jarvis.jarvis_lsp.base import BaseLSP
7
+ from jarvis.utils import PrettyOutput, OutputType
8
+
9
+ class CPPLSP(BaseLSP):
10
+ """C++ LSP implementation using clangd."""
11
+
12
+ language = "cpp"
13
+
14
+ @staticmethod
15
+ def check() -> bool:
16
+ """Check if clangd is installed."""
17
+ return shutil.which("clangd") is not None
18
+
19
+ def __init__(self):
20
+ self.workspace_path = ""
21
+ self.clangd_process = None
22
+ self.request_id = 0
23
+
24
+ def initialize(self, workspace_path: str) -> bool:
25
+ try:
26
+ self.workspace_path = workspace_path
27
+ # Start clangd process
28
+ self.clangd_process = subprocess.Popen(
29
+ ["clangd", "--background-index"],
30
+ stdin=subprocess.PIPE,
31
+ stdout=subprocess.PIPE,
32
+ stderr=subprocess.PIPE
33
+ )
34
+
35
+ # Send initialize request
36
+ self._send_request("initialize", {
37
+ "processId": os.getpid(),
38
+ "rootUri": f"file://{workspace_path}",
39
+ "capabilities": {}
40
+ })
41
+
42
+ return True
43
+ except Exception as e:
44
+ PrettyOutput.print(f"C++ LSP initialization failed: {str(e)}", OutputType.ERROR)
45
+ return False
46
+
47
+ def _send_request(self, method: str, params: Dict) -> Optional[Dict]:
48
+ """Send JSON-RPC request to clangd."""
49
+ if not self.clangd_process:
50
+ return None
51
+
52
+ try:
53
+ self.request_id += 1
54
+ request = {
55
+ "jsonrpc": "2.0",
56
+ "id": self.request_id,
57
+ "method": method,
58
+ "params": params
59
+ }
60
+
61
+ self.clangd_process.stdin.write(json.dumps(request).encode() + b"\n") # type: ignore
62
+ self.clangd_process.stdin.flush() # type: ignore
63
+
64
+ response = json.loads(self.clangd_process.stdout.readline().decode()) # type: ignore
65
+ return response.get("result")
66
+ except Exception:
67
+ return None
68
+
69
+ def find_references(self, file_path: str, position: Tuple[int, int]) -> List[Dict[str, Any]]:
70
+ result = self._send_request("textDocument/references", {
71
+ "textDocument": {"uri": f"file://{file_path}"},
72
+ "position": {"line": position[0], "character": position[1]},
73
+ "context": {"includeDeclaration": True}
74
+ })
75
+ return result or [] # type: ignore
76
+
77
+ def find_definition(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
78
+ result = self._send_request("textDocument/definition", {
79
+ "textDocument": {"uri": f"file://{file_path}"},
80
+ "position": {"line": position[0], "character": position[1]}
81
+ })
82
+ return result[0] if result else None
83
+
84
+ def get_document_symbols(self, file_path: str) -> List[Dict[str, Any]]:
85
+ result = self._send_request("textDocument/documentSymbol", {
86
+ "textDocument": {"uri": f"file://{file_path}"}
87
+ })
88
+ return result or [] # type: ignore
89
+
90
+ def get_diagnostics(self, file_path: str) -> List[Dict[str, Any]]:
91
+ # Send didOpen notification to trigger diagnostics
92
+ self._send_request("textDocument/didOpen", {
93
+ "textDocument": {
94
+ "uri": f"file://{file_path}",
95
+ "languageId": "cpp",
96
+ "version": 1,
97
+ "text": open(file_path).read()
98
+ }
99
+ })
100
+
101
+ # Wait for diagnostic notification
102
+ try:
103
+ response = json.loads(self.clangd_process.stdout.readline().decode()) # type: ignore
104
+ if response.get("method") == "textDocument/publishDiagnostics":
105
+ return response.get("params", {}).get("diagnostics", [])
106
+ except Exception:
107
+ pass
108
+ return []
109
+
110
+ def prepare_rename(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
111
+ result = self._send_request("textDocument/prepareRename", {
112
+ "textDocument": {"uri": f"file://{file_path}"},
113
+ "position": {"line": position[0], "character": position[1]}
114
+ })
115
+ return result
116
+
117
+ def validate_edit(self, file_path: str, edit: Dict[str, Any]) -> bool:
118
+ # Send workspace/willRenameFiles request to check validity
119
+ result = self._send_request("workspace/willRenameFiles", {
120
+ "files": [{
121
+ "oldUri": f"file://{file_path}",
122
+ "newUri": f"file://{file_path}.tmp"
123
+ }]
124
+ })
125
+ return bool(result)
126
+
127
+ def shutdown(self):
128
+ if self.clangd_process:
129
+ try:
130
+ self._send_request("shutdown", {})
131
+ self.clangd_process.terminate()
132
+ self.clangd_process = None
133
+ except Exception:
134
+ pass