jarvis-ai-assistant 0.1.110__py3-none-any.whl → 0.1.112__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (47) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/agent.py +51 -39
  3. jarvis/jarvis_code_agent/code_agent.py +89 -53
  4. jarvis/jarvis_code_agent/file_select.py +20 -20
  5. jarvis/jarvis_code_agent/patch.py +20 -11
  6. jarvis/jarvis_code_agent/relevant_files.py +68 -16
  7. jarvis/jarvis_codebase/main.py +82 -88
  8. jarvis/jarvis_lsp/cpp.py +1 -1
  9. jarvis/jarvis_lsp/go.py +1 -1
  10. jarvis/jarvis_lsp/python.py +0 -2
  11. jarvis/jarvis_lsp/registry.py +13 -13
  12. jarvis/jarvis_lsp/rust.py +1 -1
  13. jarvis/jarvis_platform/ai8.py +14 -14
  14. jarvis/jarvis_platform/base.py +1 -1
  15. jarvis/jarvis_platform/kimi.py +17 -17
  16. jarvis/jarvis_platform/ollama.py +14 -14
  17. jarvis/jarvis_platform/openai.py +8 -8
  18. jarvis/jarvis_platform/oyi.py +19 -19
  19. jarvis/jarvis_platform/registry.py +6 -6
  20. jarvis/jarvis_platform_manager/main.py +17 -17
  21. jarvis/jarvis_rag/main.py +25 -25
  22. jarvis/jarvis_smart_shell/main.py +6 -6
  23. jarvis/jarvis_tools/ask_codebase.py +4 -4
  24. jarvis/jarvis_tools/ask_user.py +2 -2
  25. jarvis/jarvis_tools/create_code_agent.py +8 -8
  26. jarvis/jarvis_tools/create_sub_agent.py +2 -2
  27. jarvis/jarvis_tools/execute_shell.py +2 -2
  28. jarvis/jarvis_tools/file_operation.py +1 -1
  29. jarvis/jarvis_tools/git_commiter.py +4 -6
  30. jarvis/jarvis_tools/methodology.py +3 -3
  31. jarvis/jarvis_tools/rag.py +3 -3
  32. jarvis/jarvis_tools/read_code.py +4 -3
  33. jarvis/jarvis_tools/read_webpage.py +19 -6
  34. jarvis/jarvis_tools/registry.py +11 -11
  35. jarvis/jarvis_tools/search.py +88 -27
  36. jarvis/jarvis_tools/select_code_files.py +1 -1
  37. jarvis/jarvis_tools/tool_generator.py +182 -0
  38. jarvis/utils/date_utils.py +19 -0
  39. jarvis/utils.py +31 -25
  40. jarvis_ai_assistant-0.1.112.dist-info/METADATA +460 -0
  41. jarvis_ai_assistant-0.1.112.dist-info/RECORD +64 -0
  42. jarvis_ai_assistant-0.1.110.dist-info/METADATA +0 -462
  43. jarvis_ai_assistant-0.1.110.dist-info/RECORD +0 -62
  44. {jarvis_ai_assistant-0.1.110.dist-info → jarvis_ai_assistant-0.1.112.dist-info}/LICENSE +0 -0
  45. {jarvis_ai_assistant-0.1.110.dist-info → jarvis_ai_assistant-0.1.112.dist-info}/WHEEL +0 -0
  46. {jarvis_ai_assistant-0.1.110.dist-info → jarvis_ai_assistant-0.1.112.dist-info}/entry_points.txt +0 -0
  47. {jarvis_ai_assistant-0.1.110.dist-info → jarvis_ai_assistant-0.1.112.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  from typing import Dict, Any, List
2
2
  from jarvis.jarvis_platform.registry import PlatformRegistry
3
- from jarvis.utils import PrettyOutput, OutputType
3
+ from jarvis.utils import PrettyOutput, OutputType, get_context_token_count, get_max_token_count
4
4
  from jarvis.jarvis_tools.read_webpage import WebpageTool
5
5
  from playwright.sync_api import sync_playwright
6
6
  from urllib.parse import quote
@@ -58,7 +58,7 @@ def bing_search(query):
58
58
  return summaries
59
59
 
60
60
  except Exception as error:
61
- PrettyOutput.print(f"Search error: {str(error)}", OutputType.ERROR)
61
+ PrettyOutput.print(f"搜索错误:{str(error)}", OutputType.ERROR)
62
62
  return None
63
63
 
64
64
  class SearchTool:
@@ -106,28 +106,89 @@ class SearchTool:
106
106
  })
107
107
  return formatted_results
108
108
  except Exception as e:
109
- PrettyOutput.print(f"Search request failed: {str(e)}", OutputType.ERROR)
109
+ PrettyOutput.print(f"搜索请求失败:{str(e)}", OutputType.ERROR)
110
110
  return []
111
111
 
112
112
  def _extract_info(self, contents: List[str], question: str) -> str:
113
113
  """Use language model to extract key information from web content"""
114
- prompt = f"""Please answer the question based on the following search results: {question}
114
+ try:
115
+ # Reserve tokens for prompt and response
116
+ max_tokens = get_max_token_count()
117
+ reserved_tokens = 2000 # Reserve tokens for prompt template and response
118
+ available_tokens = max_tokens - reserved_tokens
119
+
120
+ # Split contents into batches
121
+ batches = []
122
+ current_batch = []
123
+ current_tokens = 0
124
+
125
+ for content in contents:
126
+ content_tokens = get_context_token_count(content)
127
+
128
+ # If adding this content would exceed limit, start new batch
129
+ if current_tokens + content_tokens > available_tokens:
130
+ if current_batch:
131
+ batches.append(current_batch)
132
+ current_batch = [content]
133
+ current_tokens = content_tokens
134
+ else:
135
+ current_batch.append(content)
136
+ current_tokens += content_tokens
137
+
138
+ # Add final batch
139
+ if current_batch:
140
+ batches.append(current_batch)
115
141
 
116
- Search results content:
142
+ # Process each batch
143
+ batch_results = []
144
+ for i, batch in enumerate(batches, 1):
145
+ PrettyOutput.print(f"正在处理批次 {i}/{len(batches)}...", OutputType.PROGRESS)
146
+
147
+ prompt = f"""Please analyze these search results to answer the question: {question}
148
+
149
+ Search results content (Batch {i}/{len(batches)}):
117
150
  {'-' * 40}
118
- {''.join(contents)}
151
+ {''.join(batch)}
119
152
  {'-' * 40}
120
153
 
121
- Please provide a concise and accurate answer, focusing on information directly related to the question. If there is no relevant information in the search results, please clearly state that.
122
- When answering, pay attention to:
123
- 1. Maintain objectivity, providing information based solely on search results
124
- 2. If there are conflicts between different sources, point out the differences
125
- 3. Appropriately cite information sources
126
- 4. If the information is incomplete or uncertain, please explain"""
154
+ Please extract key information related to the question. Focus on:
155
+ 1. Relevant facts and details
156
+ 2. Maintaining objectivity
157
+ 3. Citing sources when appropriate
158
+ 4. Noting any uncertainties
159
+
160
+ Format your response as a clear summary of findings from this batch."""
161
+
162
+ response = self.model.chat_until_success(prompt)
163
+ batch_results.append(response)
164
+
165
+ # If only one batch, return its result directly
166
+ if len(batch_results) == 1:
167
+ return batch_results[0]
168
+
169
+ # Synthesize results from all batches
170
+ batch_findings = '\n\n'.join(f'Batch {i+1}:\n{result}' for i, result in enumerate(batch_results))
171
+ separator = '-' * 40
172
+
173
+ synthesis_prompt = f"""Please provide a comprehensive answer to the original question by synthesizing the findings from multiple batches of search results.
174
+
175
+ Original Question: {question}
176
+
177
+ Findings from each batch:
178
+ {separator}
179
+ {batch_findings}
180
+ {separator}
181
+
182
+ Please synthesize a final answer that:
183
+ 1. Combines key insights from all batches
184
+ 2. Resolves any contradictions between sources
185
+ 3. Maintains clear source attribution
186
+ 4. Acknowledges any remaining uncertainties
187
+ 5. Provides a coherent and complete response to the original question"""
188
+
189
+ final_response = self.model.chat_until_success(synthesis_prompt)
190
+ return final_response
127
191
 
128
- try:
129
- response = self.model.chat_until_success(prompt)
130
- return response
131
192
  except Exception as e:
132
193
  return f"Information extraction failed: {str(e)}"
133
194
 
@@ -139,8 +200,8 @@ When answering, pay attention to:
139
200
  max_results = args.get("max_results", 3)
140
201
 
141
202
  # Print search information
142
- PrettyOutput.print(f"Search query: {query}", OutputType.INFO)
143
- PrettyOutput.print(f"Related question: {question}", OutputType.INFO)
203
+ PrettyOutput.print(f"搜索关键词: {query}", OutputType.INFO)
204
+ PrettyOutput.print(f"相关问题: {question}", OutputType.INFO)
144
205
 
145
206
  # Get search results
146
207
  results = self._search(query, max_results)
@@ -155,13 +216,13 @@ When answering, pay attention to:
155
216
  contents = []
156
217
  for i, result in enumerate(results, 1):
157
218
  try:
158
- PrettyOutput.print(f"Reading result {i}/{len(results)}... {result['title']} - {result['href']}", OutputType.PROGRESS)
219
+ PrettyOutput.print(f"正在读取结果 {i}/{len(results)}... {result['title']} - {result['href']}", OutputType.PROGRESS)
159
220
  webpage_result = self.webpage_tool.execute({"url": result["href"]})
160
221
  if webpage_result["success"]:
161
222
  contents.append(f"\nSource {i}: {result['href']}\n")
162
223
  contents.append(webpage_result["stdout"])
163
224
  except Exception as e:
164
- PrettyOutput.print(f"Failed to read result {i}: {str(e)}", OutputType.WARNING)
225
+ PrettyOutput.print(f"读取结果失败 {i}: {str(e)}", OutputType.WARNING)
165
226
  continue
166
227
 
167
228
  if not contents:
@@ -172,7 +233,7 @@ When answering, pay attention to:
172
233
  }
173
234
 
174
235
  # Extract information
175
- PrettyOutput.print("Analyzing search results...", OutputType.PROGRESS)
236
+ PrettyOutput.print("正在分析搜索结果...", OutputType.PROGRESS)
176
237
  analysis = self._extract_info(contents, question)
177
238
 
178
239
  return {
@@ -200,15 +261,15 @@ def main():
200
261
  args = parser.parse_args()
201
262
 
202
263
  try:
203
- PrettyOutput.print(f"Searching: {args.query}", OutputType.INFO)
264
+ PrettyOutput.print(f"搜索: {args.query}", OutputType.INFO)
204
265
 
205
266
  results = bing_search(args.query)
206
267
 
207
268
  if not results:
208
- PrettyOutput.print("No search results found", OutputType.WARNING)
269
+ PrettyOutput.print("未找到搜索结果", OutputType.WARNING)
209
270
  sys.exit(1)
210
271
 
211
- PrettyOutput.print(f"\nFound {len(results)} results:", OutputType.INFO)
272
+ PrettyOutput.print(f"\n找到 {len(results)} 个结果:", OutputType.INFO)
212
273
 
213
274
  for i, result in enumerate(results[:args.max], 1):
214
275
  output = []
@@ -217,16 +278,16 @@ def main():
217
278
  output.append(f"{i}. {result['href']}")
218
279
  else:
219
280
  output.append(f"{i}. {result['title']}")
220
- output.append(f"Link: {result['href']}")
281
+ output.append(f"链接: {result['href']}")
221
282
  if result['abstract']:
222
- output.append(f"Abstract: {result['abstract']}")
283
+ output.append(f"摘要: {result['abstract']}")
223
284
  PrettyOutput.print("\n".join(output), OutputType.INFO)
224
285
 
225
286
  except KeyboardInterrupt:
226
- PrettyOutput.print("Search cancelled", OutputType.WARNING)
287
+ PrettyOutput.print("搜索已取消", OutputType.WARNING)
227
288
  sys.exit(1)
228
289
  except Exception as e:
229
- PrettyOutput.print(f"Execution error: {str(e)}", OutputType.ERROR)
290
+ PrettyOutput.print(f"执行错误: {str(e)}", OutputType.ERROR)
230
291
  sys.exit(1)
231
292
 
232
293
  if __name__ == "__main__":
@@ -33,7 +33,7 @@ class CodeFileSelecterTool:
33
33
  related_files = args.get("related_files", [])
34
34
  root_dir = args.get("root_dir", ".").strip()
35
35
 
36
- PrettyOutput.print("Starting interactive file selection...", OutputType.INFO)
36
+ PrettyOutput.print("开始交互式文件选择...", OutputType.INFO)
37
37
 
38
38
  # Use file_select module to handle file selection
39
39
  selected_files = select_files(
@@ -0,0 +1,182 @@
1
+ """
2
+ Tool Generator Tool - Automatically creates new tools using LLM
3
+ """
4
+ from pathlib import Path
5
+ import re
6
+ from typing import Dict, Any
7
+ from jarvis.jarvis_platform.registry import PlatformRegistry
8
+
9
+ class ToolGenerator:
10
+ name = "tool_generator"
11
+ description = "Generates new tools using LLM that integrate with the system"
12
+ parameters = {
13
+ "type": "object",
14
+ "properties": {
15
+ "tool_name": {
16
+ "type": "string",
17
+ "description": "Name of the new tool"
18
+ },
19
+ "description": {
20
+ "type": "string",
21
+ "description": "Description of the tool's purpose"
22
+ },
23
+ "input_spec": {
24
+ "type": "string",
25
+ "description": "Specification of required inputs and functionality"
26
+ }
27
+ },
28
+ "required": ["tool_name", "description", "input_spec"]
29
+ }
30
+
31
+ def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
32
+ """Generate and save a new tool using LLM"""
33
+ # Get fresh model instance for each execution
34
+ model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
35
+
36
+ try:
37
+ tool_name = arguments["tool_name"]
38
+ description = arguments["description"]
39
+ input_spec = arguments["input_spec"]
40
+
41
+ # Generate tool implementation using LLM
42
+ prompt = self._create_prompt(tool_name, description, input_spec)
43
+ llm_response = model.chat_until_success(prompt)
44
+
45
+ # Extract implementation with more flexible parsing
46
+ implementation = self._extract_code(llm_response)
47
+ if not implementation:
48
+ return {
49
+ "success": False,
50
+ "stdout": "",
51
+ "stderr": "Could not extract valid Python code from LLM response"
52
+ }
53
+
54
+ # Validate return value format
55
+ if not self._validate_return_value_format(implementation):
56
+ return {
57
+ "success": False,
58
+ "stdout": "",
59
+ "stderr": "Generated tool does not follow required return value format"
60
+ }
61
+
62
+ # Save the new tool
63
+ tools_dir = Path.home() / ".jarvis" / "tools"
64
+ tools_dir.mkdir(parents=True, exist_ok=True)
65
+ tool_file = tools_dir / f"{tool_name}.py"
66
+
67
+ with open(tool_file, "w") as f:
68
+ f.write(implementation)
69
+
70
+ return {
71
+ "success": True,
72
+ "stdout": f"Tool successfully generated at: {tool_file}",
73
+ "stderr": ""
74
+ }
75
+
76
+ except Exception as e:
77
+ return {
78
+ "success": False,
79
+ "stdout": "",
80
+ "stderr": f"Tool generation failed: {str(e)}"
81
+ }
82
+
83
+ def _create_prompt(self, tool_name: str, description: str, input_spec: str) -> str:
84
+ """Create the LLM prompt for tool generation"""
85
+ example_code = '''
86
+ <TOOL>
87
+ from typing import Dict, Any
88
+ from jarvis.utils import OutputType, PrettyOutput
89
+ from jarvis.jarvis_platform.registry import PlatformRegistry
90
+
91
+ class CustomTool:
92
+ name = "Tool name" # Tool name used when calling
93
+ description = "Tool description" # Tool purpose
94
+ parameters = { # Parameters JSON Schema
95
+ "type": "object",
96
+ "properties": {
97
+ "param1": {
98
+ "type": "string",
99
+ "description": "Parameter description"
100
+ }
101
+ },
102
+ "required": ["param1"]
103
+ }
104
+
105
+ def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
106
+ """Execute the tool functionality
107
+
108
+ Args:
109
+ args: Parameters passed to the tool
110
+
111
+ Returns:
112
+ {
113
+ "success": bool,
114
+ "stdout": str,
115
+ "stderr": str,
116
+ }
117
+ """
118
+ try:
119
+ # Implement the tool logic here
120
+ # Use LLM
121
+ # model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
122
+ # result = model.chat_until_success(prompt)
123
+
124
+ result = "Tool result"
125
+ return {
126
+ "success": True,
127
+ "stdout": result,
128
+ "stderr": ""
129
+ }
130
+ except Exception as e:
131
+ return {
132
+ "success": False,
133
+ "stdout": "",
134
+ "stderr": str(e)
135
+ }
136
+ </TOOL>
137
+ '''
138
+
139
+
140
+ return f'''Create a Python tool class that integrates with the Jarvis system. Follow these requirements:
141
+ 1. Class name: {tool_name.capitalize()}Tool
142
+ 2. Description: {description}
143
+ 3. Input specification: {input_spec}
144
+ 4. Must include these class attributes:
145
+ - name: str (tool identifier)
146
+ - description: str (tool purpose)
147
+ - parameters: dict (JSON schema for inputs)
148
+ 5. Must implement execute(self, args: Dict) -> Dict method
149
+ 6. The execute method MUST return a dictionary with these exact fields:
150
+ - success: bool (indicating operation success)
151
+ - stdout: str (primary output/result)
152
+ - stderr: str (error message if any)
153
+ 7. Must handle errors gracefully
154
+ 8. Return ONLY the Python implementation code
155
+ 9. The code should be complete and ready to use.
156
+ 10. Output the code in the following format:
157
+ <TOOL>
158
+ {example_code}
159
+ </TOOL>
160
+
161
+ Example:
162
+ {example_code}
163
+ '''
164
+
165
+ def _extract_code(self, response: str) -> str:
166
+ """Flexibly extract Python code from LLM response"""
167
+ # Find the first occurrence of <TOOL> and </TOOL>
168
+ sm = re.search(r'<TOOL>(.*?)</TOOL>', response, re.DOTALL)
169
+ if sm:
170
+ return sm.group(1)
171
+ return ""
172
+
173
+ def _validate_return_value_format(self, code: str) -> bool:
174
+ """Validate that execute method returns correct format"""
175
+ required_fields = ["success", "stdout", "stderr"]
176
+ # Look for execute method
177
+ if "def execute(self, args: Dict) -> Dict:" not in code and \
178
+ "def execute(self, args: Dict) -> Dict[str, Any]:" not in code:
179
+ return False
180
+
181
+ # Check for required fields in return statement
182
+ return all(field in code for field in required_fields)
@@ -0,0 +1,19 @@
1
+ from datetime import datetime
2
+
3
+ class DateValidator:
4
+ @staticmethod
5
+ def validate_iso_date(date_str: str) -> bool:
6
+ try:
7
+ datetime.fromisoformat(date_str)
8
+ return True
9
+ except ValueError:
10
+ return False
11
+
12
+ @staticmethod
13
+ def validate_date_range(start: str, end: str) -> bool:
14
+ try:
15
+ start_dt = datetime.fromisoformat(start)
16
+ end_dt = datetime.fromisoformat(end)
17
+ return start_dt <= end_dt
18
+ except ValueError:
19
+ return False
jarvis/utils.py CHANGED
@@ -213,6 +213,8 @@ class FileCompleter(Completer):
213
213
  """Custom completer for file paths with fuzzy matching."""
214
214
  def __init__(self):
215
215
  self.path_completer = PathCompleter()
216
+ self.max_suggestions = 10 # 增加显示数量
217
+ self.min_score = 10 # 降低相似度阈值
216
218
 
217
219
  def get_completions(self, document: Document, complete_event):
218
220
  text = document.text_before_cursor
@@ -251,21 +253,26 @@ class FileCompleter(Completer):
251
253
  # If no input after @, show all files
252
254
  # Otherwise use fuzzy matching
253
255
  if not file_path:
254
- scored_files = [(path, 100) for path in all_files]
256
+ scored_files = [(path, 100) for path in all_files[:self.max_suggestions]]
255
257
  else:
256
258
  scored_files = [
257
259
  (path, fuzz.ratio(file_path.lower(), path.lower()))
258
260
  for path in all_files
259
261
  ]
262
+ # Sort by score and take top results
260
263
  scored_files.sort(key=lambda x: x[1], reverse=True)
264
+ scored_files = scored_files[:self.max_suggestions]
261
265
 
262
266
  # Return completions for files
263
267
  for path, score in scored_files:
264
- if not file_path or score > 30: # Show all if no input, otherwise filter by score
268
+ if not file_path or score > self.min_score:
269
+ display_text = path
270
+ if file_path and score < 100:
271
+ display_text = f"{path} ({score}%)"
265
272
  completion = Completion(
266
273
  text=path,
267
274
  start_position=-len(file_path),
268
- display=f"{path}" if not file_path else f"{path} ({score}%)",
275
+ display=display_text,
269
276
  display_meta="File"
270
277
  )
271
278
  yield completion
@@ -315,7 +322,7 @@ def get_multiline_input(tip: str) -> str:
315
322
  lines.append(line)
316
323
 
317
324
  except KeyboardInterrupt:
318
- PrettyOutput.print("Input cancelled", OutputType.INFO)
325
+ PrettyOutput.print("输入已取消", OutputType.INFO)
319
326
  return ""
320
327
 
321
328
  return "\n".join(lines)
@@ -341,7 +348,7 @@ def init_env():
341
348
  except ValueError:
342
349
  continue
343
350
  except Exception as e:
344
- PrettyOutput.print(f"Warning: Failed to read {env_file}: {e}", OutputType.WARNING)
351
+ PrettyOutput.print(f"警告: 读取 {env_file} 失败: {e}", OutputType.WARNING)
345
352
 
346
353
 
347
354
  def while_success(func, sleep_time: float = 0.1):
@@ -349,7 +356,7 @@ def while_success(func, sleep_time: float = 0.1):
349
356
  try:
350
357
  return func()
351
358
  except Exception as e:
352
- PrettyOutput.print(f"Execution failed: {str(e)}, retry in {sleep_time}s...", OutputType.ERROR)
359
+ PrettyOutput.print(f"执行失败: {str(e)}, 等待 {sleep_time}s...", OutputType.ERROR)
353
360
  time.sleep(sleep_time)
354
361
  continue
355
362
 
@@ -359,7 +366,7 @@ def while_true(func, sleep_time: float = 0.1):
359
366
  ret = func()
360
367
  if ret:
361
368
  break
362
- PrettyOutput.print(f"Execution failed, retry in {sleep_time}s...", OutputType.WARNING)
369
+ PrettyOutput.print(f"执行失败, 等待 {sleep_time}s...", OutputType.WARNING)
363
370
  time.sleep(sleep_time)
364
371
  return ret
365
372
 
@@ -376,6 +383,7 @@ def has_uncommitted_changes():
376
383
  working_changes = os.popen("git diff --exit-code").read().strip() != ""
377
384
  # Check staged changes
378
385
  staged_changes = os.popen("git diff --cached --exit-code").read().strip() != ""
386
+ os.system("git reset HEAD")
379
387
  return working_changes or staged_changes
380
388
 
381
389
  def load_embedding_model():
@@ -425,7 +433,7 @@ def load_rerank_model():
425
433
  model_name = "BAAI/bge-reranker-v2-m3"
426
434
  cache_dir = os.path.expanduser("~/.cache/huggingface/hub")
427
435
 
428
- PrettyOutput.print(f"Loading reranking model: {model_name}...", OutputType.INFO)
436
+ PrettyOutput.print(f"加载重排序模型: {model_name}...", OutputType.INFO)
429
437
 
430
438
  try:
431
439
  # Load model and tokenizer
@@ -476,7 +484,7 @@ def is_long_context(files: list) -> bool:
476
484
  if total_tokens > threshold:
477
485
  return True
478
486
  except Exception as e:
479
- PrettyOutput.print(f"Failed to read file {file_path}: {e}", OutputType.WARNING)
487
+ PrettyOutput.print(f"读取文件 {file_path} 失败: {e}", OutputType.WARNING)
480
488
  continue
481
489
 
482
490
  return total_tokens > threshold
@@ -503,13 +511,13 @@ def _create_methodology_embedding(embedding_model: Any, methodology_text: str) -
503
511
  vector = np.array(embedding.cpu().numpy(), dtype=np.float32)
504
512
  return vector[0] # Return first vector, because we only encoded one text
505
513
  except Exception as e:
506
- PrettyOutput.print(f"Failed to create methodology embedding vector: {str(e)}", OutputType.ERROR)
514
+ PrettyOutput.print(f"创建方法论嵌入向量失败: {str(e)}", OutputType.ERROR)
507
515
  return np.zeros(1536, dtype=np.float32)
508
516
 
509
517
 
510
518
  def load_methodology(user_input: str) -> str:
511
519
  """Load methodology and build vector index"""
512
- PrettyOutput.print("Loading methodology...", OutputType.PROGRESS)
520
+ PrettyOutput.print("加载方法论...", OutputType.PROGRESS)
513
521
  user_jarvis_methodology = os.path.expanduser("~/.jarvis/methodology")
514
522
  if not os.path.exists(user_jarvis_methodology):
515
523
  return ""
@@ -557,7 +565,7 @@ def load_methodology(user_input: str) -> str:
557
565
  methodology_index.add_with_ids(vectors_array, np.array(ids)) # type: ignore
558
566
  query_embedding = _create_methodology_embedding(embedding_model, user_input)
559
567
  k = min(3, len(methodology_data))
560
- PrettyOutput.print(f"Retrieving methodology...", OutputType.INFO)
568
+ PrettyOutput.print(f"检索方法论...", OutputType.INFO)
561
569
  distances, indices = methodology_index.search(
562
570
  query_embedding.reshape(1, -1), k
563
571
  ) # type: ignore
@@ -582,9 +590,7 @@ def load_methodology(user_input: str) -> str:
582
590
  return make_methodology_prompt(data)
583
591
 
584
592
  except Exception as e:
585
- PrettyOutput.print(f"Error loading methodology: {str(e)}", OutputType.ERROR)
586
- import traceback
587
- PrettyOutput.print(f"Error trace: {traceback.format_exc()}", OutputType.INFO)
593
+ PrettyOutput.print(f"加载方法论失败: {str(e)}", OutputType.ERROR)
588
594
  return ""
589
595
 
590
596
 
@@ -640,15 +646,15 @@ def init_gpu_config() -> Dict:
640
646
  torch.cuda.empty_cache()
641
647
 
642
648
  PrettyOutput.print(
643
- f"GPU initialized: {torch.cuda.get_device_name(0)}\n"
644
- f"Device Memory: {gpu_mem / 1024**3:.1f}GB\n"
645
- f"Shared Memory: {config['shared_memory'] / 1024**3:.1f}GB",
649
+ f"GPU已初始化: {torch.cuda.get_device_name(0)}\n"
650
+ f"设备内存: {gpu_mem / 1024**3:.1f}GB\n"
651
+ f"共享内存: {config['shared_memory'] / 1024**3:.1f}GB",
646
652
  output_type=OutputType.SUCCESS
647
653
  )
648
654
  else:
649
- PrettyOutput.print("No GPU available, using CPU mode", output_type=OutputType.WARNING)
655
+ PrettyOutput.print("没有GPU可用, 使用CPU模式", output_type=OutputType.WARNING)
650
656
  except Exception as e:
651
- PrettyOutput.print(f"GPU initialization failed: {str(e)}", output_type=OutputType.WARNING)
657
+ PrettyOutput.print(f"GPU初始化失败: {str(e)}", output_type=OutputType.WARNING)
652
658
 
653
659
  return config
654
660
 
@@ -669,7 +675,7 @@ def get_embedding_batch(embedding_model: Any, texts: List[str]) -> np.ndarray:
669
675
  all_vectors.extend(vectors)
670
676
  return np.vstack(all_vectors)
671
677
  except Exception as e:
672
- PrettyOutput.print(f"Batch embedding failed: {str(e)}", OutputType.ERROR)
678
+ PrettyOutput.print(f"批量嵌入失败: {str(e)}", OutputType.ERROR)
673
679
  return np.zeros((0, embedding_model.get_sentence_embedding_dimension()), dtype=np.float32)
674
680
 
675
681
 
@@ -685,9 +691,6 @@ def dont_use_local_model():
685
691
 
686
692
  def is_auto_complete() -> bool:
687
693
  return os.getenv('JARVIS_AUTO_COMPLETE', 'false') == 'true'
688
-
689
- def is_disable_codebase() -> bool:
690
- return os.getenv('JARVIS_DISABLE_CODEBASE', 'false') == 'true'
691
694
 
692
695
  def is_use_methodology() -> bool:
693
696
  return os.getenv('JARVIS_USE_METHODOLOGY', 'true') == 'true'
@@ -731,6 +734,9 @@ def get_cheap_platform_name() -> str:
731
734
  def get_cheap_model_name() -> str:
732
735
  return os.getenv('JARVIS_CHEAP_MODEL', os.getenv('JARVIS_MODEL', 'kimi'))
733
736
 
737
+ def is_execute_tool_confirm() -> bool:
738
+ return os.getenv('JARVIS_EXECUTE_TOOL_CONFIRM', 'false') == 'true'
739
+
734
740
  def split_text_into_chunks(text: str, max_length: int = 512) -> List[str]:
735
741
  """Split text into chunks with overlapping windows"""
736
742
  chunks = []
@@ -778,7 +784,7 @@ def get_context_token_count(text: str) -> int:
778
784
  return sum([len(tokenizer.encode(chunk)) for chunk in chunks])
779
785
 
780
786
  except Exception as e:
781
- PrettyOutput.print(f"Error counting tokens: {str(e)}", OutputType.WARNING)
787
+ PrettyOutput.print(f"计算token失败: {str(e)}", OutputType.WARNING)
782
788
  # Fallback to rough character-based estimate
783
789
  return len(text) // 4 # Rough estimate of 4 chars per token
784
790