jarvis-ai-assistant 0.1.103__py3-none-any.whl → 0.1.105__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jarvis-ai-assistant might be problematic. Click here for more details.
- jarvis/__init__.py +1 -1
- jarvis/agent.py +124 -67
- jarvis/jarvis_code_agent/code_agent.py +133 -22
- jarvis/jarvis_code_agent/file_select.py +4 -6
- jarvis/jarvis_code_agent/patch.py +6 -7
- jarvis/jarvis_code_agent/relevant_files.py +163 -41
- jarvis/jarvis_codebase/main.py +43 -29
- jarvis/jarvis_lsp/base.py +143 -0
- jarvis/jarvis_lsp/cpp.py +134 -0
- jarvis/jarvis_lsp/go.py +140 -0
- jarvis/jarvis_lsp/python.py +135 -0
- jarvis/jarvis_lsp/registry.py +234 -0
- jarvis/jarvis_lsp/rust.py +142 -0
- jarvis/jarvis_platform/__init__.py +3 -0
- jarvis/{models → jarvis_platform}/ai8.py +1 -1
- jarvis/{models → jarvis_platform}/kimi.py +1 -1
- jarvis/{models → jarvis_platform}/ollama.py +1 -1
- jarvis/{models → jarvis_platform}/openai.py +1 -1
- jarvis/{models → jarvis_platform}/oyi.py +1 -1
- jarvis/{models → jarvis_platform}/registry.py +11 -11
- jarvis/{jarvis_platform → jarvis_platform_manager}/main.py +2 -2
- jarvis/jarvis_rag/main.py +8 -8
- jarvis/jarvis_smart_shell/main.py +3 -3
- jarvis/jarvis_tools/__init__.py +0 -0
- jarvis/{tools → jarvis_tools}/ask_codebase.py +1 -4
- jarvis/{tools → jarvis_tools}/ask_user.py +1 -1
- jarvis/{tools → jarvis_tools}/chdir.py +2 -37
- jarvis/jarvis_tools/code_review.py +236 -0
- jarvis/jarvis_tools/create_code_agent.py +115 -0
- jarvis/{tools → jarvis_tools}/create_sub_agent.py +1 -1
- jarvis/jarvis_tools/deep_thinking.py +160 -0
- jarvis/jarvis_tools/deep_thinking_agent.py +146 -0
- jarvis/{tools → jarvis_tools}/git_commiter.py +3 -3
- jarvis/jarvis_tools/lsp_find_definition.py +134 -0
- jarvis/jarvis_tools/lsp_find_references.py +111 -0
- jarvis/jarvis_tools/lsp_get_diagnostics.py +121 -0
- jarvis/jarvis_tools/lsp_get_document_symbols.py +87 -0
- jarvis/jarvis_tools/lsp_prepare_rename.py +130 -0
- jarvis/jarvis_tools/lsp_validate_edit.py +141 -0
- jarvis/{tools → jarvis_tools}/methodology.py +6 -1
- jarvis/{tools → jarvis_tools}/rag.py +1 -1
- jarvis/{tools → jarvis_tools}/read_code.py +0 -31
- jarvis/{tools → jarvis_tools}/registry.py +6 -5
- jarvis/{tools → jarvis_tools}/search.py +2 -2
- jarvis/utils.py +71 -28
- {jarvis_ai_assistant-0.1.103.dist-info → jarvis_ai_assistant-0.1.105.dist-info}/METADATA +98 -62
- jarvis_ai_assistant-0.1.105.dist-info/RECORD +62 -0
- {jarvis_ai_assistant-0.1.103.dist-info → jarvis_ai_assistant-0.1.105.dist-info}/entry_points.txt +4 -4
- jarvis/models/__init__.py +0 -3
- jarvis/tools/code_review.py +0 -163
- jarvis/tools/create_code_sub_agent.py +0 -30
- jarvis/tools/create_code_test_agent.py +0 -115
- jarvis/tools/create_ctags_agent.py +0 -176
- jarvis/tools/find_in_codebase.py +0 -108
- jarvis_ai_assistant-0.1.103.dist-info/RECORD +0 -51
- /jarvis/{models → jarvis_platform}/base.py +0 -0
- /jarvis/{tools → jarvis_platform_manager}/__init__.py +0 -0
- /jarvis/{tools → jarvis_tools}/base.py +0 -0
- /jarvis/{tools → jarvis_tools}/execute_shell.py +0 -0
- /jarvis/{tools → jarvis_tools}/file_operation.py +0 -0
- /jarvis/{tools → jarvis_tools}/read_webpage.py +0 -0
- /jarvis/{tools → jarvis_tools}/select_code_files.py +0 -0
- {jarvis_ai_assistant-0.1.103.dist-info → jarvis_ai_assistant-0.1.105.dist-info}/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.103.dist-info → jarvis_ai_assistant-0.1.105.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.103.dist-info → jarvis_ai_assistant-0.1.105.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,3 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
1
|
import os
|
|
5
2
|
import re
|
|
6
3
|
from typing import List
|
|
@@ -9,11 +6,170 @@ import yaml
|
|
|
9
6
|
from jarvis.agent import Agent
|
|
10
7
|
from jarvis.jarvis_code_agent.file_select import select_files
|
|
11
8
|
from jarvis.jarvis_codebase.main import CodeBase
|
|
12
|
-
from jarvis.
|
|
13
|
-
from jarvis.
|
|
9
|
+
from jarvis.jarvis_platform.registry import PlatformRegistry
|
|
10
|
+
from jarvis.jarvis_tools.registry import ToolRegistry
|
|
14
11
|
from jarvis.utils import OutputType, PrettyOutput, is_disable_codebase
|
|
15
12
|
|
|
16
13
|
|
|
14
|
+
def find_relevant_files_from_agent(user_input: str, files_from_codebase: List[str]) -> List[str]:
|
|
15
|
+
find_file_tool_registry = ToolRegistry()
|
|
16
|
+
find_file_tool_registry.use_tools(["read_code",
|
|
17
|
+
"execute_shell",
|
|
18
|
+
"lsp_get_document_symbols",
|
|
19
|
+
"lsp_get_diagnostics",
|
|
20
|
+
"lsp_find_references",
|
|
21
|
+
"lsp_find_definition",
|
|
22
|
+
"lsp_prepare_rename",
|
|
23
|
+
"lsp_validate_edit"])
|
|
24
|
+
find_file_agent = Agent(
|
|
25
|
+
system_prompt="""You are a file agent, you are responsible for finding files related to the user's requirement.
|
|
26
|
+
|
|
27
|
+
THINKING PROCESS:
|
|
28
|
+
1. Initial File Verification
|
|
29
|
+
```
|
|
30
|
+
Thought: Let me examine the suggested files...
|
|
31
|
+
Action: For each suggested file:
|
|
32
|
+
- Use read_code to check content
|
|
33
|
+
- Use LSP tools to analyze structure
|
|
34
|
+
Observation: Found that...
|
|
35
|
+
|
|
36
|
+
Thought: Evaluate actual relevance...
|
|
37
|
+
Action: For each file:
|
|
38
|
+
- Check direct relationship to requirement
|
|
39
|
+
- Verify functionality matches
|
|
40
|
+
- Look for clear evidence of relevance
|
|
41
|
+
Observation: After analysis:
|
|
42
|
+
- Relevant files: [list with reasons]
|
|
43
|
+
- Removed files: [list with reasons]
|
|
44
|
+
|
|
45
|
+
Thought: Verify removal decisions...
|
|
46
|
+
Action: Double-check each removed file
|
|
47
|
+
Observation: Removal justification:
|
|
48
|
+
- File X: [specific reason for removal]
|
|
49
|
+
- File Y: [specific reason for removal]
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
2. Additional File Search
|
|
53
|
+
```
|
|
54
|
+
Thought: Plan search strategy for missing aspects...
|
|
55
|
+
Action: Use combination of tools:
|
|
56
|
+
- git grep for key terms
|
|
57
|
+
- LSP tools for references
|
|
58
|
+
- Dependency analysis
|
|
59
|
+
Observation: Found additional files...
|
|
60
|
+
|
|
61
|
+
Thought: Validate new files...
|
|
62
|
+
Action: For each new file:
|
|
63
|
+
- Verify direct relevance
|
|
64
|
+
- Check for false positives
|
|
65
|
+
- Document clear evidence
|
|
66
|
+
Observation: After validation:
|
|
67
|
+
- Confirmed relevant: [list with evidence]
|
|
68
|
+
- Excluded: [list with reasons]
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
3. Comprehensive Analysis
|
|
72
|
+
```
|
|
73
|
+
Thought: Final relevance check...
|
|
74
|
+
Action: For each remaining file:
|
|
75
|
+
- Verify essential to requirement
|
|
76
|
+
- Check for indirect inclusions
|
|
77
|
+
- Validate necessity
|
|
78
|
+
Observation: Final cleanup:
|
|
79
|
+
- Core files: [list with roles]
|
|
80
|
+
- Removed borderline cases: [list with reasons]
|
|
81
|
+
|
|
82
|
+
Thought: Ensure minimal complete set...
|
|
83
|
+
Action: Review final file list
|
|
84
|
+
Observation: Confirmed each file is:
|
|
85
|
+
- Directly relevant
|
|
86
|
+
- Essential for requirement
|
|
87
|
+
- Supported by evidence
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
FILE READING GUIDELINES:
|
|
91
|
+
1. For Large Files (>200 lines):
|
|
92
|
+
```
|
|
93
|
+
Thought: This file is large, need targeted reading...
|
|
94
|
+
Action:
|
|
95
|
+
- First: execute_shell("grep -n 'key_term' path/to/file")
|
|
96
|
+
- Then: read_code("path/to/file", start_line=x-10, end_line=x+20)
|
|
97
|
+
Observation: Relevance analysis:
|
|
98
|
+
- Relevant sections: [details]
|
|
99
|
+
- Irrelevant sections: [reasons to ignore]
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
2. For Small Files:
|
|
103
|
+
```
|
|
104
|
+
Thought: This is a small file, can read entirely...
|
|
105
|
+
Action: read_code("path/to/file")
|
|
106
|
+
Observation: Relevance analysis:
|
|
107
|
+
- Key evidence: [details]
|
|
108
|
+
- Irrelevant aspects: [what to ignore]
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
VERIFICATION RULES:
|
|
112
|
+
- Remove files without clear relevance evidence
|
|
113
|
+
- Exclude files with only tangential relationships
|
|
114
|
+
- Delete files that only contain imports/references
|
|
115
|
+
- Remove files if relevance is uncertain
|
|
116
|
+
- Document specific reasons for each removal
|
|
117
|
+
- Keep only files essential to requirement
|
|
118
|
+
- Maintain minimal complete set
|
|
119
|
+
|
|
120
|
+
OUTPUT FORMAT:
|
|
121
|
+
<FILE_PATH>
|
|
122
|
+
- file_path1 # KEEP: [specific evidence of relevance]
|
|
123
|
+
- file_path2 # KEEP: [clear relationship to requirement]
|
|
124
|
+
</FILE_PATH>
|
|
125
|
+
""",
|
|
126
|
+
name="FindFileAgent",
|
|
127
|
+
is_sub_agent=True,
|
|
128
|
+
tool_registry=find_file_tool_registry,
|
|
129
|
+
platform=PlatformRegistry().get_normal_platform(),
|
|
130
|
+
auto_complete=True,
|
|
131
|
+
summary_prompt="""Please provide only the verified essential files with evidence:
|
|
132
|
+
<FILE_PATH>
|
|
133
|
+
- file_path1 # KEEP: [concrete evidence of necessity]
|
|
134
|
+
- file_path2 # KEEP: [specific relevance proof]
|
|
135
|
+
</FILE_PATH>
|
|
136
|
+
""")
|
|
137
|
+
|
|
138
|
+
prompt = f"Find files related to: '{user_input}'\n"
|
|
139
|
+
if files_from_codebase:
|
|
140
|
+
prompt += f"""
|
|
141
|
+
Potentially related files: {files_from_codebase}
|
|
142
|
+
|
|
143
|
+
ANALYSIS REQUIRED:
|
|
144
|
+
1. Verify each suggested file:
|
|
145
|
+
- Document relevance evidence
|
|
146
|
+
- Identify actual relationships
|
|
147
|
+
- Note any missing aspects
|
|
148
|
+
|
|
149
|
+
2. Search for additional files:
|
|
150
|
+
- Fill coverage gaps
|
|
151
|
+
- Find related components
|
|
152
|
+
- Locate test files
|
|
153
|
+
|
|
154
|
+
3. Provide reasoning:
|
|
155
|
+
- Explain why each file is included
|
|
156
|
+
- Document verification process
|
|
157
|
+
- Note any uncertainties
|
|
158
|
+
"""
|
|
159
|
+
output = find_file_agent.run(prompt)
|
|
160
|
+
|
|
161
|
+
rsp_from_agent = re.findall(r'<FILE_PATH>(.*?)</FILE_PATH>', output, re.DOTALL)
|
|
162
|
+
files_from_agent = []
|
|
163
|
+
if rsp_from_agent:
|
|
164
|
+
try:
|
|
165
|
+
files_from_agent = yaml.safe_load(rsp_from_agent[0])
|
|
166
|
+
except Exception as e:
|
|
167
|
+
files_from_agent = []
|
|
168
|
+
else:
|
|
169
|
+
files_from_agent = []
|
|
170
|
+
return files_from_agent
|
|
171
|
+
|
|
172
|
+
|
|
17
173
|
def find_relevant_files(user_input: str, root_dir: str) -> List[str]:
|
|
18
174
|
try:
|
|
19
175
|
files_from_codebase = []
|
|
@@ -23,42 +179,8 @@ def find_relevant_files(user_input: str, root_dir: str) -> List[str]:
|
|
|
23
179
|
files_from_codebase = codebase.search_similar(user_input)
|
|
24
180
|
|
|
25
181
|
PrettyOutput.print("Find files by agent...", OutputType.INFO)
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
find_file_agent = Agent(
|
|
29
|
-
system_prompt="""You are a file agent, you are responsible for finding files related to the user's requirement.
|
|
30
|
-
You can use `read_code` tool to read the code and analyze the code, and `execute_shell` tool to execute shell command(such as `grep/find/ls/git/ctags`) to find files.
|
|
31
|
-
|
|
32
|
-
IMPORTANT:
|
|
33
|
-
- Only provide the file path, do not provide any other information.
|
|
34
|
-
- If you can't find the file, please provide empty list.
|
|
35
|
-
- Don't modify the code, just find related files.
|
|
36
|
-
""",
|
|
37
|
-
name="FindFileAgent",
|
|
38
|
-
is_sub_agent=True,
|
|
39
|
-
tool_registry=find_file_tool_registry,
|
|
40
|
-
platform=PlatformRegistry().get_normal_platform(),
|
|
41
|
-
auto_complete=True,
|
|
42
|
-
summary_prompt="""Please provide the file path as this format(yaml list), if you can't find the file, please provide empty list:
|
|
43
|
-
<FILE_PATH>
|
|
44
|
-
- file_path1
|
|
45
|
-
- file_path2
|
|
46
|
-
</FILE_PATH>
|
|
47
|
-
""")
|
|
48
|
-
prompt = f"Find files related about '{user_input}'\n"
|
|
49
|
-
if files_from_codebase:
|
|
50
|
-
prompt += f"\n\nFiles maybe related: {files_from_codebase}\n\n Please read above files first"
|
|
51
|
-
output = find_file_agent.run(prompt)
|
|
52
|
-
|
|
53
|
-
rsp_from_agent = re.findall(r'<FILE_PATH>(.*?)</FILE_PATH>', output, re.DOTALL)
|
|
54
|
-
files_from_agent = []
|
|
55
|
-
if rsp_from_agent:
|
|
56
|
-
try:
|
|
57
|
-
files_from_agent = yaml.safe_load(rsp_from_agent[0])
|
|
58
|
-
except Exception as e:
|
|
59
|
-
files_from_agent = []
|
|
60
|
-
else:
|
|
61
|
-
files_from_agent = []
|
|
182
|
+
|
|
183
|
+
files_from_agent = find_relevant_files_from_agent(user_input, files_from_codebase)
|
|
62
184
|
|
|
63
185
|
selected_files = select_files(files_from_agent, os.getcwd())
|
|
64
186
|
return selected_files
|
jarvis/jarvis_codebase/main.py
CHANGED
|
@@ -4,12 +4,10 @@ import numpy as np
|
|
|
4
4
|
import faiss
|
|
5
5
|
from typing import List, Tuple, Optional, Dict
|
|
6
6
|
|
|
7
|
-
import
|
|
8
|
-
from jarvis.models.registry import PlatformRegistry
|
|
7
|
+
from jarvis.jarvis_platform.registry import PlatformRegistry
|
|
9
8
|
import concurrent.futures
|
|
10
|
-
from threading import Lock
|
|
11
9
|
from concurrent.futures import ThreadPoolExecutor
|
|
12
|
-
from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_file_md5, get_max_context_length,
|
|
10
|
+
from jarvis.utils import OutputType, PrettyOutput, find_git_root, get_file_md5, get_max_context_length, get_thread_count, load_embedding_model, user_confirm
|
|
13
11
|
from jarvis.utils import init_env
|
|
14
12
|
import argparse
|
|
15
13
|
import pickle
|
|
@@ -62,12 +60,11 @@ class CodeBase:
|
|
|
62
60
|
return [f for f in files if not f.startswith(".jarvis")]
|
|
63
61
|
|
|
64
62
|
def is_text_file(self, file_path: str):
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
return False
|
|
63
|
+
try:
|
|
64
|
+
open(file_path, "r", encoding="utf-8").read()
|
|
65
|
+
return True
|
|
66
|
+
except Exception:
|
|
67
|
+
return False
|
|
71
68
|
|
|
72
69
|
def make_description(self, file_path: str, content: str) -> str:
|
|
73
70
|
model = PlatformRegistry.get_global_platform_registry().get_cheap_platform()
|
|
@@ -386,6 +383,24 @@ Content: {content}
|
|
|
386
383
|
force: Whether to force rebuild the index, without asking the user
|
|
387
384
|
"""
|
|
388
385
|
try:
|
|
386
|
+
# Clean up cache for non-existent files
|
|
387
|
+
files_to_delete = []
|
|
388
|
+
for cached_file in list(self.vector_cache.keys()):
|
|
389
|
+
if not os.path.exists(cached_file) or not self.is_text_file(cached_file):
|
|
390
|
+
files_to_delete.append(cached_file)
|
|
391
|
+
cache_path = self._get_cache_path(cached_file)
|
|
392
|
+
try:
|
|
393
|
+
os.remove(cache_path)
|
|
394
|
+
except Exception as e:
|
|
395
|
+
PrettyOutput.print(f"Failed to delete cache file for {cached_file}: {str(e)}",
|
|
396
|
+
output_type=OutputType.WARNING)
|
|
397
|
+
|
|
398
|
+
if files_to_delete:
|
|
399
|
+
for file_path in files_to_delete:
|
|
400
|
+
del self.vector_cache[file_path]
|
|
401
|
+
PrettyOutput.print(f"Cleaned cache for {len(files_to_delete)} non-existent files",
|
|
402
|
+
output_type=OutputType.INFO)
|
|
403
|
+
|
|
389
404
|
# Update the git file list
|
|
390
405
|
self.git_file_list = self.get_git_file_list()
|
|
391
406
|
|
|
@@ -443,16 +458,9 @@ Content: {content}
|
|
|
443
458
|
|
|
444
459
|
# If force is True, continue directly
|
|
445
460
|
if not force:
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
if response in ['y', 'yes']:
|
|
450
|
-
break
|
|
451
|
-
elif response in ['', 'n', 'no']:
|
|
452
|
-
PrettyOutput.print("Cancel rebuilding the index", output_type=OutputType.INFO)
|
|
453
|
-
return
|
|
454
|
-
else:
|
|
455
|
-
PrettyOutput.print("Please input y or n", output_type=OutputType.WARNING)
|
|
461
|
+
if not user_confirm("Rebuild the index?", False):
|
|
462
|
+
PrettyOutput.print("Cancel rebuilding the index", output_type=OutputType.INFO)
|
|
463
|
+
return
|
|
456
464
|
|
|
457
465
|
# Clean deleted files
|
|
458
466
|
for file_path in files_to_delete:
|
|
@@ -649,7 +657,7 @@ Note: Only include files that have a strong connection to the query."""
|
|
|
649
657
|
List[str]: The query variants list
|
|
650
658
|
"""
|
|
651
659
|
model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
|
|
652
|
-
prompt = f"""Please generate 3 different expressions based on the following query, each expression should fully convey the meaning of the original query. These expressions will be used for code search, maintain professionalism and accuracy.
|
|
660
|
+
prompt = f"""Please generate 3 different expressions based on the following query in English, each expression should fully convey the meaning of the original query. These expressions will be used for code search, maintain professionalism and accuracy.
|
|
653
661
|
Original query: {query}
|
|
654
662
|
|
|
655
663
|
Please output 3 expressions directly, separated by two line breaks, without numbering or other markers.
|
|
@@ -718,7 +726,7 @@ Please output 3 expressions directly, separated by two line breaks, without numb
|
|
|
718
726
|
message = "Found related files:\n"
|
|
719
727
|
for path, score, _ in initial_results:
|
|
720
728
|
message += f"File: {path} Similarity: {score:.3f}\n"
|
|
721
|
-
PrettyOutput.print(message.rstrip(), output_type=OutputType.INFO)
|
|
729
|
+
PrettyOutput.print(message.rstrip(), output_type=OutputType.INFO, lang="markdown")
|
|
722
730
|
|
|
723
731
|
# Reorder the preliminary results
|
|
724
732
|
return self.pick_results(query, [path for path, _, _ in initial_results])
|
|
@@ -729,19 +737,23 @@ Please output 3 expressions directly, separated by two line breaks, without numb
|
|
|
729
737
|
|
|
730
738
|
def ask_codebase(self, query: str, top_k: int=20) -> str:
|
|
731
739
|
"""Query the codebase"""
|
|
732
|
-
|
|
733
|
-
|
|
740
|
+
reuslts_from_codebase = self.search_similar(query, top_k)
|
|
741
|
+
|
|
742
|
+
from jarvis.jarvis_code_agent.relevant_files import find_relevant_files_from_agent
|
|
743
|
+
results_from_agent = find_relevant_files_from_agent(query, reuslts_from_codebase)
|
|
744
|
+
|
|
745
|
+
if not results_from_agent:
|
|
734
746
|
PrettyOutput.print("No related files found", output_type=OutputType.WARNING)
|
|
735
747
|
return ""
|
|
736
748
|
|
|
737
749
|
message = "Found related files:\n"
|
|
738
|
-
for path in
|
|
750
|
+
for path in results_from_agent:
|
|
739
751
|
message += f"File: {path}\n"
|
|
740
|
-
PrettyOutput.print(message.rstrip(), output_type=OutputType.SUCCESS)
|
|
752
|
+
PrettyOutput.print(message.rstrip(), output_type=OutputType.SUCCESS, lang="markdown")
|
|
741
753
|
|
|
742
754
|
prompt = f"""You are a code expert, please answer the user's question based on the following file information:
|
|
743
755
|
"""
|
|
744
|
-
for path in
|
|
756
|
+
for path in results_from_agent:
|
|
745
757
|
try:
|
|
746
758
|
if len(prompt) > self.max_context_length:
|
|
747
759
|
PrettyOutput.print(f"Avoid context overflow, discard low-related file: {path}", OutputType.WARNING)
|
|
@@ -762,6 +774,8 @@ File content:
|
|
|
762
774
|
User question: {query}
|
|
763
775
|
|
|
764
776
|
Please answer the user's question in Chinese using professional language. If the provided file content is insufficient to answer the user's question, please inform the user. Never make up information.
|
|
777
|
+
|
|
778
|
+
Add reference files and code snippets at the end of the answer.
|
|
765
779
|
"""
|
|
766
780
|
model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
|
|
767
781
|
response = model.chat_until_success(prompt)
|
|
@@ -862,8 +876,8 @@ def main():
|
|
|
862
876
|
|
|
863
877
|
output = "Search Results:\n"
|
|
864
878
|
for path in results:
|
|
865
|
-
output += f"""{path}\n"""
|
|
866
|
-
PrettyOutput.print(output, output_type=OutputType.INFO)
|
|
879
|
+
output += f"""- {path}\n"""
|
|
880
|
+
PrettyOutput.print(output, output_type=OutputType.INFO, lang="markdown")
|
|
867
881
|
|
|
868
882
|
elif args.command == 'ask':
|
|
869
883
|
response = codebase.ask_codebase(args.question, args.top_k)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import List, Dict, Optional, Tuple, Any
|
|
3
|
+
|
|
4
|
+
class BaseLSP(ABC):
|
|
5
|
+
"""Base class for Language Server Protocol integration.
|
|
6
|
+
|
|
7
|
+
Core LSP features needed for LLM-based code editing:
|
|
8
|
+
1. Code navigation and analysis
|
|
9
|
+
2. Code modification validation
|
|
10
|
+
3. Diagnostic information
|
|
11
|
+
4. Symbol analysis
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
language: str = "" # Language identifier, should be overridden by subclasses
|
|
15
|
+
|
|
16
|
+
@abstractmethod
|
|
17
|
+
def initialize(self, workspace_path: str) -> bool:
|
|
18
|
+
"""Initialize LSP server for the workspace.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
workspace_path: Root path of the workspace
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
bool: True if initialization successful
|
|
25
|
+
"""
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def find_references(self, file_path: str, position: Tuple[int, int]) -> List[Dict[str, Any]]:
|
|
30
|
+
"""Find all references of symbol at position.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
file_path: Path to the file
|
|
34
|
+
position: (line, character) tuple
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
List of references with location info:
|
|
38
|
+
[
|
|
39
|
+
{
|
|
40
|
+
"uri": "file path",
|
|
41
|
+
"range": {
|
|
42
|
+
"start": {"line": int, "character": int},
|
|
43
|
+
"end": {"line": int, "character": int}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
]
|
|
47
|
+
"""
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
@abstractmethod
|
|
51
|
+
def find_definition(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
|
|
52
|
+
"""Find definition of symbol at position.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
file_path: Path to the file
|
|
56
|
+
position: (line, character) tuple
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Location of definition:
|
|
60
|
+
{
|
|
61
|
+
"uri": "file path",
|
|
62
|
+
"range": {
|
|
63
|
+
"start": {"line": int, "character": int},
|
|
64
|
+
"end": {"line": int, "character": int}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
"""
|
|
68
|
+
return None
|
|
69
|
+
|
|
70
|
+
@abstractmethod
|
|
71
|
+
def get_document_symbols(self, file_path: str) -> List[Dict[str, Any]]:
|
|
72
|
+
"""Get all symbols in document.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
file_path: Path to the file
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
List of symbols with their locations and types
|
|
79
|
+
"""
|
|
80
|
+
return []
|
|
81
|
+
|
|
82
|
+
@abstractmethod
|
|
83
|
+
def get_diagnostics(self, file_path: str) -> List[Dict[str, Any]]:
|
|
84
|
+
"""Get diagnostics (errors, warnings) for file.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
file_path: Path to the file
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
List of diagnostic items:
|
|
91
|
+
[
|
|
92
|
+
{
|
|
93
|
+
"range": {
|
|
94
|
+
"start": {"line": int, "character": int},
|
|
95
|
+
"end": {"line": int, "character": int}
|
|
96
|
+
},
|
|
97
|
+
"severity": 1 | 2 | 3 | 4, # Error=1, Warning=2, Info=3, Hint=4
|
|
98
|
+
"code": str, # Error code if any
|
|
99
|
+
"source": str, # Source of diagnostic (e.g. "pylint")
|
|
100
|
+
"message": str, # Diagnostic message
|
|
101
|
+
"relatedInformation": [ # Optional related info
|
|
102
|
+
{
|
|
103
|
+
"location": {
|
|
104
|
+
"uri": str,
|
|
105
|
+
"range": {...}
|
|
106
|
+
},
|
|
107
|
+
"message": str
|
|
108
|
+
}
|
|
109
|
+
]
|
|
110
|
+
}
|
|
111
|
+
]
|
|
112
|
+
"""
|
|
113
|
+
return []
|
|
114
|
+
|
|
115
|
+
@abstractmethod
|
|
116
|
+
def prepare_rename(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
|
|
117
|
+
"""Check if symbol at position can be renamed.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
file_path: Path to the file
|
|
121
|
+
position: Symbol position
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Range that would be renamed or None if rename not allowed
|
|
125
|
+
"""
|
|
126
|
+
return None
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
def validate_edit(self, file_path: str, edit: Dict[str, Any]) -> bool:
|
|
130
|
+
"""Validate if proposed edit is syntactically correct.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
file_path: Path to the file
|
|
134
|
+
edit: Edit operation in LSP format
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
bool: True if edit is valid
|
|
138
|
+
"""
|
|
139
|
+
return False
|
|
140
|
+
|
|
141
|
+
def shutdown(self):
|
|
142
|
+
"""Shutdown LSP server cleanly."""
|
|
143
|
+
pass
|
jarvis/jarvis_lsp/cpp.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
import subprocess
|
|
4
|
+
from typing import List, Dict, Optional, Tuple, Any
|
|
5
|
+
import json
|
|
6
|
+
from jarvis.jarvis_lsp.base import BaseLSP
|
|
7
|
+
from jarvis.utils import PrettyOutput, OutputType
|
|
8
|
+
|
|
9
|
+
class CPPLSP(BaseLSP):
|
|
10
|
+
"""C++ LSP implementation using clangd."""
|
|
11
|
+
|
|
12
|
+
language = "cpp"
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
def check() -> bool:
|
|
16
|
+
"""Check if clangd is installed."""
|
|
17
|
+
return shutil.which("clangd") is not None
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.workspace_path = ""
|
|
21
|
+
self.clangd_process = None
|
|
22
|
+
self.request_id = 0
|
|
23
|
+
|
|
24
|
+
def initialize(self, workspace_path: str) -> bool:
|
|
25
|
+
try:
|
|
26
|
+
self.workspace_path = workspace_path
|
|
27
|
+
# Start clangd process
|
|
28
|
+
self.clangd_process = subprocess.Popen(
|
|
29
|
+
["clangd", "--background-index"],
|
|
30
|
+
stdin=subprocess.PIPE,
|
|
31
|
+
stdout=subprocess.PIPE,
|
|
32
|
+
stderr=subprocess.PIPE
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Send initialize request
|
|
36
|
+
self._send_request("initialize", {
|
|
37
|
+
"processId": os.getpid(),
|
|
38
|
+
"rootUri": f"file://{workspace_path}",
|
|
39
|
+
"capabilities": {}
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
return True
|
|
43
|
+
except Exception as e:
|
|
44
|
+
PrettyOutput.print(f"C++ LSP initialization failed: {str(e)}", OutputType.ERROR)
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
def _send_request(self, method: str, params: Dict) -> Optional[Dict]:
|
|
48
|
+
"""Send JSON-RPC request to clangd."""
|
|
49
|
+
if not self.clangd_process:
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
self.request_id += 1
|
|
54
|
+
request = {
|
|
55
|
+
"jsonrpc": "2.0",
|
|
56
|
+
"id": self.request_id,
|
|
57
|
+
"method": method,
|
|
58
|
+
"params": params
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
self.clangd_process.stdin.write(json.dumps(request).encode() + b"\n") # type: ignore
|
|
62
|
+
self.clangd_process.stdin.flush() # type: ignore
|
|
63
|
+
|
|
64
|
+
response = json.loads(self.clangd_process.stdout.readline().decode()) # type: ignore
|
|
65
|
+
return response.get("result")
|
|
66
|
+
except Exception:
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
def find_references(self, file_path: str, position: Tuple[int, int]) -> List[Dict[str, Any]]:
|
|
70
|
+
result = self._send_request("textDocument/references", {
|
|
71
|
+
"textDocument": {"uri": f"file://{file_path}"},
|
|
72
|
+
"position": {"line": position[0], "character": position[1]},
|
|
73
|
+
"context": {"includeDeclaration": True}
|
|
74
|
+
})
|
|
75
|
+
return result or [] # type: ignore
|
|
76
|
+
|
|
77
|
+
def find_definition(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
|
|
78
|
+
result = self._send_request("textDocument/definition", {
|
|
79
|
+
"textDocument": {"uri": f"file://{file_path}"},
|
|
80
|
+
"position": {"line": position[0], "character": position[1]}
|
|
81
|
+
})
|
|
82
|
+
return result[0] if result else None
|
|
83
|
+
|
|
84
|
+
def get_document_symbols(self, file_path: str) -> List[Dict[str, Any]]:
|
|
85
|
+
result = self._send_request("textDocument/documentSymbol", {
|
|
86
|
+
"textDocument": {"uri": f"file://{file_path}"}
|
|
87
|
+
})
|
|
88
|
+
return result or [] # type: ignore
|
|
89
|
+
|
|
90
|
+
def get_diagnostics(self, file_path: str) -> List[Dict[str, Any]]:
|
|
91
|
+
# Send didOpen notification to trigger diagnostics
|
|
92
|
+
self._send_request("textDocument/didOpen", {
|
|
93
|
+
"textDocument": {
|
|
94
|
+
"uri": f"file://{file_path}",
|
|
95
|
+
"languageId": "cpp",
|
|
96
|
+
"version": 1,
|
|
97
|
+
"text": open(file_path).read()
|
|
98
|
+
}
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
# Wait for diagnostic notification
|
|
102
|
+
try:
|
|
103
|
+
response = json.loads(self.clangd_process.stdout.readline().decode()) # type: ignore
|
|
104
|
+
if response.get("method") == "textDocument/publishDiagnostics":
|
|
105
|
+
return response.get("params", {}).get("diagnostics", [])
|
|
106
|
+
except Exception:
|
|
107
|
+
pass
|
|
108
|
+
return []
|
|
109
|
+
|
|
110
|
+
def prepare_rename(self, file_path: str, position: Tuple[int, int]) -> Optional[Dict[str, Any]]:
|
|
111
|
+
result = self._send_request("textDocument/prepareRename", {
|
|
112
|
+
"textDocument": {"uri": f"file://{file_path}"},
|
|
113
|
+
"position": {"line": position[0], "character": position[1]}
|
|
114
|
+
})
|
|
115
|
+
return result
|
|
116
|
+
|
|
117
|
+
def validate_edit(self, file_path: str, edit: Dict[str, Any]) -> bool:
|
|
118
|
+
# Send workspace/willRenameFiles request to check validity
|
|
119
|
+
result = self._send_request("workspace/willRenameFiles", {
|
|
120
|
+
"files": [{
|
|
121
|
+
"oldUri": f"file://{file_path}",
|
|
122
|
+
"newUri": f"file://{file_path}.tmp"
|
|
123
|
+
}]
|
|
124
|
+
})
|
|
125
|
+
return bool(result)
|
|
126
|
+
|
|
127
|
+
def shutdown(self):
|
|
128
|
+
if self.clangd_process:
|
|
129
|
+
try:
|
|
130
|
+
self._send_request("shutdown", {})
|
|
131
|
+
self.clangd_process.terminate()
|
|
132
|
+
self.clangd_process = None
|
|
133
|
+
except Exception:
|
|
134
|
+
pass
|