jarvis-ai-assistant 0.3.19__py3-none-any.whl → 0.3.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/jarvis_agent/__init__.py +33 -5
  3. jarvis/jarvis_agent/config_editor.py +5 -1
  4. jarvis/jarvis_agent/edit_file_handler.py +15 -9
  5. jarvis/jarvis_agent/jarvis.py +99 -3
  6. jarvis/jarvis_agent/memory_manager.py +3 -3
  7. jarvis/jarvis_agent/share_manager.py +3 -1
  8. jarvis/jarvis_agent/shell_input_handler.py +17 -2
  9. jarvis/jarvis_agent/task_analyzer.py +0 -1
  10. jarvis/jarvis_agent/task_manager.py +15 -5
  11. jarvis/jarvis_agent/tool_executor.py +2 -2
  12. jarvis/jarvis_code_agent/code_agent.py +39 -16
  13. jarvis/jarvis_git_utils/git_commiter.py +3 -6
  14. jarvis/jarvis_mcp/sse_mcp_client.py +9 -3
  15. jarvis/jarvis_mcp/streamable_mcp_client.py +15 -5
  16. jarvis/jarvis_memory_organizer/memory_organizer.py +1 -1
  17. jarvis/jarvis_methodology/main.py +4 -4
  18. jarvis/jarvis_multi_agent/__init__.py +3 -3
  19. jarvis/jarvis_platform/ai8.py +0 -4
  20. jarvis/jarvis_platform/base.py +12 -7
  21. jarvis/jarvis_platform/kimi.py +18 -6
  22. jarvis/jarvis_platform/tongyi.py +18 -5
  23. jarvis/jarvis_platform/yuanbao.py +10 -3
  24. jarvis/jarvis_platform_manager/main.py +21 -7
  25. jarvis/jarvis_platform_manager/service.py +4 -3
  26. jarvis/jarvis_rag/cli.py +61 -22
  27. jarvis/jarvis_rag/embedding_manager.py +10 -3
  28. jarvis/jarvis_rag/llm_interface.py +4 -1
  29. jarvis/jarvis_rag/query_rewriter.py +3 -1
  30. jarvis/jarvis_rag/rag_pipeline.py +11 -3
  31. jarvis/jarvis_rag/retriever.py +151 -2
  32. jarvis/jarvis_smart_shell/main.py +60 -19
  33. jarvis/jarvis_stats/cli.py +12 -9
  34. jarvis/jarvis_stats/stats.py +17 -11
  35. jarvis/jarvis_stats/storage.py +23 -6
  36. jarvis/jarvis_tools/cli/main.py +63 -29
  37. jarvis/jarvis_tools/edit_file.py +3 -4
  38. jarvis/jarvis_tools/file_analyzer.py +0 -1
  39. jarvis/jarvis_tools/generate_new_tool.py +3 -3
  40. jarvis/jarvis_tools/read_code.py +0 -1
  41. jarvis/jarvis_tools/read_webpage.py +14 -4
  42. jarvis/jarvis_tools/registry.py +0 -3
  43. jarvis/jarvis_tools/retrieve_memory.py +0 -1
  44. jarvis/jarvis_tools/save_memory.py +0 -1
  45. jarvis/jarvis_tools/search_web.py +0 -2
  46. jarvis/jarvis_tools/sub_agent.py +197 -0
  47. jarvis/jarvis_tools/sub_code_agent.py +194 -0
  48. jarvis/jarvis_tools/virtual_tty.py +21 -13
  49. jarvis/jarvis_utils/clipboard.py +1 -1
  50. jarvis/jarvis_utils/config.py +35 -5
  51. jarvis/jarvis_utils/input.py +528 -41
  52. jarvis/jarvis_utils/methodology.py +3 -1
  53. jarvis/jarvis_utils/output.py +218 -129
  54. jarvis/jarvis_utils/utils.py +480 -170
  55. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/METADATA +10 -2
  56. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/RECORD +60 -58
  57. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/WHEEL +0 -0
  58. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/entry_points.txt +0 -0
  59. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/licenses/LICENSE +0 -0
  60. {jarvis_ai_assistant-0.3.19.dist-info → jarvis_ai_assistant-0.3.21.dist-info}/top_level.txt +0 -0
@@ -163,7 +163,7 @@ class MemoryOrganizer:
163
163
  )
164
164
  all_tags.update(memory.get("tags", []))
165
165
 
166
- memory_contents_str = (("="*50) + "\n").join(memory_contents)
166
+ memory_contents_str = (("=" * 50) + "\n").join(memory_contents)
167
167
 
168
168
  prompt = f"""请将以下{len(memories)}个相关记忆合并成一个综合性的记忆。
169
169
 
@@ -67,9 +67,7 @@ def import_methodology(
67
67
 
68
68
 
69
69
  @app.command("export")
70
- def export_methodology(
71
- output_file: str = typer.Argument(..., help="导出文件路径")
72
- ):
70
+ def export_methodology(output_file: str = typer.Argument(..., help="导出文件路径")):
73
71
  """导出当前方法论到单个文件"""
74
72
  try:
75
73
  methodologies = _load_all_methodologies()
@@ -211,7 +209,9 @@ def extract_methodology(
211
209
 
212
210
 
213
211
  @app.command("extract-url")
214
- def extract_methodology_from_url(url: str = typer.Argument(..., help="要提取方法论的URL")):
212
+ def extract_methodology_from_url(
213
+ url: str = typer.Argument(..., help="要提取方法论的URL")
214
+ ):
215
215
  """从URL提取方法论"""
216
216
  try:
217
217
  # 获取平台实例
@@ -115,9 +115,9 @@ content: |2
115
115
 
116
116
  if name != self.main_agent_name and self.original_question:
117
117
  system_prompt = config.get("system_prompt", "")
118
- config[
119
- "system_prompt"
120
- ] = f"{system_prompt}\n\n# 原始问题\n{self.original_question}"
118
+ config["system_prompt"] = (
119
+ f"{system_prompt}\n\n# 原始问题\n{self.original_question}"
120
+ )
121
121
 
122
122
  output_handler = config.get("output_handler", [])
123
123
  if len(output_handler) == 0:
@@ -50,10 +50,6 @@ class AI8Model(BasePlatform):
50
50
  }
51
51
 
52
52
  self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
53
- if self.model_name not in self.get_available_models():
54
- PrettyOutput.print(
55
- f"警告: 选择的模型 {self.model_name} 不在可用列表中", OutputType.WARNING
56
- )
57
53
 
58
54
  def set_model_name(self, model_name: str):
59
55
  """Set model name"""
@@ -84,7 +84,9 @@ class BasePlatform(ABC):
84
84
  ) # 留出一些余量
85
85
  min_chunk_size = get_max_input_token_count(self.model_group) - 2048
86
86
  inputs = split_text_into_chunks(message, max_chunk_size, min_chunk_size)
87
- PrettyOutput.print(f"长上下文,分批提交,共{len(inputs)}部分...", OutputType.INFO)
87
+ PrettyOutput.print(
88
+ f"长上下文,分批提交,共{len(inputs)}部分...", OutputType.INFO
89
+ )
88
90
  prefix_prompt = f"""
89
91
  我将分多次提供大量内容,在我明确告诉你内容已经全部提供完毕之前,每次仅需要输出"已收到",明白请输出"开始接收输入"。
90
92
  """
@@ -96,7 +98,6 @@ class BasePlatform(ABC):
96
98
  submit_count += 1
97
99
  length += len(input)
98
100
 
99
-
100
101
  response += "\n"
101
102
  for trunk in while_true(
102
103
  lambda: while_success(
@@ -109,7 +110,6 @@ class BasePlatform(ABC):
109
110
  ):
110
111
  response += trunk
111
112
 
112
-
113
113
  PrettyOutput.print("提交完成", OutputType.SUCCESS)
114
114
  response += "\n" + while_true(
115
115
  lambda: while_success(
@@ -134,11 +134,13 @@ class BasePlatform(ABC):
134
134
  with Live(panel, refresh_per_second=10, transient=False) as live:
135
135
  for s in self.chat(message):
136
136
  response += s
137
- if is_immediate_abort() and get_interrupt():
138
- return response
139
137
  text_content.append(s, style="bright_white")
140
- panel.subtitle = "[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]"
138
+ panel.subtitle = (
139
+ "[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]"
140
+ )
141
141
  live.update(panel)
142
+ if is_immediate_abort() and get_interrupt():
143
+ return response
142
144
  end_time = time.time()
143
145
  duration = end_time - start_time
144
146
  panel.subtitle = (
@@ -147,7 +149,10 @@ class BasePlatform(ABC):
147
149
  live.update(panel)
148
150
  else:
149
151
  # Print a clear prefix line before streaming model output (non-pretty mode)
150
- console.print(f"🤖 模型输出 - {self.name()} (按 Ctrl+C 中断)", soft_wrap=False)
152
+ console.print(
153
+ f"🤖 模型输出 - {self.name()} (按 Ctrl+C 中断)",
154
+ soft_wrap=False,
155
+ )
151
156
  for s in self.chat(message):
152
157
  console.print(s, end="")
153
158
  response += s
@@ -194,7 +194,9 @@ class KimiModel(BasePlatform):
194
194
  uploaded_files = []
195
195
  for index, file_path in enumerate(file_list, 1):
196
196
  file_name = os.path.basename(file_path)
197
- PrettyOutput.print(f"处理文件 [{index}/{len(file_list)}]: {file_name}", OutputType.INFO)
197
+ PrettyOutput.print(
198
+ f"处理文件 [{index}/{len(file_list)}]: {file_name}", OutputType.INFO
199
+ )
198
200
  try:
199
201
  mime_type, _ = mimetypes.guess_type(file_path)
200
202
  action = (
@@ -214,22 +216,32 @@ class KimiModel(BasePlatform):
214
216
 
215
217
  # 只有文件需要解析
216
218
  if action == "file":
217
- PrettyOutput.print(f"等待文件解析: {file_name}", OutputType.INFO)
219
+ PrettyOutput.print(
220
+ f"等待文件解析: {file_name}", OutputType.INFO
221
+ )
218
222
  if self._wait_for_parse(file_info["id"]):
219
223
  uploaded_files.append(file_info)
220
- PrettyOutput.print(f"文件处理完成: {file_name}", OutputType.SUCCESS)
224
+ PrettyOutput.print(
225
+ f"文件处理完成: {file_name}", OutputType.SUCCESS
226
+ )
221
227
  else:
222
- PrettyOutput.print(f"文件解析失败: {file_name}", OutputType.ERROR)
228
+ PrettyOutput.print(
229
+ f"文件解析失败: {file_name}", OutputType.ERROR
230
+ )
223
231
  return False
224
232
  else:
225
233
  uploaded_files.append(file_info)
226
- PrettyOutput.print(f"图片处理完成: {file_name}", OutputType.SUCCESS)
234
+ PrettyOutput.print(
235
+ f"图片处理完成: {file_name}", OutputType.SUCCESS
236
+ )
227
237
  else:
228
238
  PrettyOutput.print(f"文件上传失败: {file_name}", OutputType.ERROR)
229
239
  return False
230
240
 
231
241
  except Exception as e:
232
- PrettyOutput.print(f"处理文件出错 {file_path}: {str(e)}", OutputType.ERROR)
242
+ PrettyOutput.print(
243
+ f"处理文件出错 {file_path}: {str(e)}", OutputType.ERROR
244
+ )
233
245
  return False
234
246
 
235
247
  self.uploaded_files = uploaded_files
@@ -308,7 +308,10 @@ class TongyiPlatform(BasePlatform):
308
308
  )
309
309
 
310
310
  if response.status_code != 200:
311
- PrettyOutput.print(f"上传失败 {file_name}: HTTP {response.status_code}", OutputType.ERROR)
311
+ PrettyOutput.print(
312
+ f"上传失败 {file_name}: HTTP {response.status_code}",
313
+ OutputType.ERROR,
314
+ )
312
315
  return False
313
316
 
314
317
  # Determine file type based on extension
@@ -340,18 +343,26 @@ class TongyiPlatform(BasePlatform):
340
343
 
341
344
  response = http.post(url, headers=headers, json=payload)
342
345
  if response.status_code != 200:
343
- PrettyOutput.print(f"获取下载链接失败: HTTP {response.status_code}", OutputType.ERROR)
346
+ PrettyOutput.print(
347
+ f"获取下载链接失败: HTTP {response.status_code}",
348
+ OutputType.ERROR,
349
+ )
344
350
  return False
345
351
 
346
352
  result = response.json()
347
353
  if not result.get("success"):
348
- PrettyOutput.print(f"获取下载链接失败: {result.get('errorMsg')}", OutputType.ERROR)
354
+ PrettyOutput.print(
355
+ f"获取下载链接失败: {result.get('errorMsg')}",
356
+ OutputType.ERROR,
357
+ )
349
358
  return False
350
359
 
351
360
  # Add files to chat
352
361
  self.uploaded_file_info = result.get("data", {}).get("results", [])
353
362
  for file_info in self.uploaded_file_info:
354
- PrettyOutput.print(f"添加文件到对话: {file_name}", OutputType.INFO)
363
+ PrettyOutput.print(
364
+ f"添加文件到对话: {file_name}", OutputType.INFO
365
+ )
355
366
  add_url = "https://api.tongyi.com/assistant/api/chat/file/add"
356
367
  add_payload = {
357
368
  "workSource": "chat",
@@ -394,7 +405,9 @@ class TongyiPlatform(BasePlatform):
394
405
  time.sleep(1) # 短暂暂停以便用户看到成功状态
395
406
 
396
407
  except Exception as e:
397
- PrettyOutput.print(f"上传文件 {file_name} 时出错: {str(e)}", OutputType.ERROR)
408
+ PrettyOutput.print(
409
+ f"上传文件 {file_name} 时出错: {str(e)}", OutputType.ERROR
410
+ )
398
411
  return False
399
412
  return True
400
413
 
@@ -195,7 +195,9 @@ class YuanbaoPlatform(BasePlatform):
195
195
  PrettyOutput.print(f"获取上传信息: {file_name}", OutputType.INFO)
196
196
  upload_info = self._generate_upload_info(file_name)
197
197
  if not upload_info:
198
- PrettyOutput.print(f"无法获取文件 {file_name} 的上传信息", OutputType.ERROR)
198
+ PrettyOutput.print(
199
+ f"无法获取文件 {file_name} 的上传信息", OutputType.ERROR
200
+ )
199
201
  return False
200
202
 
201
203
  # 3. Upload the file to COS
@@ -224,14 +226,19 @@ class YuanbaoPlatform(BasePlatform):
224
226
  file_metadata["width"] = img.width
225
227
  file_metadata["height"] = img.height
226
228
  except Exception as e:
227
- PrettyOutput.print(f"无法获取图片 {file_name} 的尺寸: {str(e)}", OutputType.WARNING)
229
+ PrettyOutput.print(
230
+ f"无法获取图片 {file_name} 的尺寸: {str(e)}",
231
+ OutputType.WARNING,
232
+ )
228
233
 
229
234
  uploaded_files.append(file_metadata)
230
235
  PrettyOutput.print(f"文件 {file_name} 上传成功", OutputType.SUCCESS)
231
236
  time.sleep(3) # 上传成功后等待3秒
232
237
 
233
238
  except Exception as e:
234
- PrettyOutput.print(f"上传文件 {file_path} 时出错: {str(e)}", OutputType.ERROR)
239
+ PrettyOutput.print(
240
+ f"上传文件 {file_path} 时出错: {str(e)}", OutputType.ERROR
241
+ )
235
242
  return False
236
243
 
237
244
  self.multimedia = uploaded_files
@@ -26,7 +26,9 @@ app = typer.Typer(help="Jarvis AI 平台")
26
26
 
27
27
  @app.command("info")
28
28
  def list_platforms(
29
- platform: Optional[str] = typer.Option(None, "--platform", "-p", help="指定要查看的平台")
29
+ platform: Optional[str] = typer.Option(
30
+ None, "--platform", "-p", help="指定要查看的平台"
31
+ )
30
32
  ) -> None:
31
33
  """列出所有支持的平台和模型,或指定平台的详细信息。"""
32
34
  registry = PlatformRegistry.get_global_platform_registry()
@@ -217,9 +219,13 @@ def chat_with_model(
217
219
  for entry in conversation_history:
218
220
  file_obj.write(f"{entry['role']}: {entry['content']}\n\n")
219
221
 
220
- PrettyOutput.print(f"所有对话已保存到 {file_path}", OutputType.SUCCESS)
222
+ PrettyOutput.print(
223
+ f"所有对话已保存到 {file_path}", OutputType.SUCCESS
224
+ )
221
225
  except Exception as exc:
222
- PrettyOutput.print(f"保存所有对话失败: {str(exc)}", OutputType.ERROR)
226
+ PrettyOutput.print(
227
+ f"保存所有对话失败: {str(exc)}", OutputType.ERROR
228
+ )
223
229
  continue
224
230
 
225
231
  # Check if it is a save_session command
@@ -240,7 +246,9 @@ def chat_with_model(
240
246
  file_path = file_path[1:-1]
241
247
 
242
248
  if platform.save(file_path):
243
- PrettyOutput.print(f"会话已保存到 {file_path}", OutputType.SUCCESS)
249
+ PrettyOutput.print(
250
+ f"会话已保存到 {file_path}", OutputType.SUCCESS
251
+ )
244
252
  else:
245
253
  PrettyOutput.print("保存会话失败", OutputType.ERROR)
246
254
  except Exception as exc:
@@ -266,7 +274,9 @@ def chat_with_model(
266
274
 
267
275
  if platform.restore(file_path):
268
276
  conversation_history = [] # Clear local history after loading
269
- PrettyOutput.print(f"会话已从 {file_path} 加载", OutputType.SUCCESS)
277
+ PrettyOutput.print(
278
+ f"会话已从 {file_path} 加载", OutputType.SUCCESS
279
+ )
270
280
  else:
271
281
  PrettyOutput.print("加载会话失败", OutputType.ERROR)
272
282
  except Exception as exc:
@@ -346,7 +356,9 @@ def validate_platform_model(platform: Optional[str], model: Optional[str]) -> bo
346
356
 
347
357
  @app.command("chat")
348
358
  def chat_command(
349
- platform: Optional[str] = typer.Option(None, "--platform", "-p", help="指定要使用的平台"),
359
+ platform: Optional[str] = typer.Option(
360
+ None, "--platform", "-p", help="指定要使用的平台"
361
+ ),
350
362
  model: Optional[str] = typer.Option(None, "--model", "-m", help="指定要使用的模型"),
351
363
  llm_type: str = typer.Option(
352
364
  "normal",
@@ -429,7 +441,9 @@ def role_command(
429
441
  platform: Optional[str] = typer.Option(
430
442
  None, "--platform", "-p", help="指定要使用的平台,覆盖角色配置"
431
443
  ),
432
- model: Optional[str] = typer.Option(None, "--model", "-m", help="指定要使用的模型,覆盖角色配置"),
444
+ model: Optional[str] = typer.Option(
445
+ None, "--model", "-m", help="指定要使用的模型,覆盖角色配置"
446
+ ),
433
447
  llm_type: Optional[str] = typer.Option(
434
448
  None,
435
449
  "-t",
@@ -108,8 +108,6 @@ def start_service(
108
108
  OutputType.INFO,
109
109
  )
110
110
 
111
- PrettyOutput.print("Available platforms:", OutputType.INFO)
112
-
113
111
  # Platform and model cache
114
112
  platform_instances: Dict[str, Any] = {}
115
113
 
@@ -178,7 +176,10 @@ def start_service(
178
176
  }
179
177
  )
180
178
  except Exception as exc:
181
- print(f"Error getting models for {default_platform}: {str(exc)}")
179
+ PrettyOutput.print(
180
+ f"Error getting models for {default_platform}: {str(exc)}",
181
+ OutputType.ERROR,
182
+ )
182
183
 
183
184
  # Return model list
184
185
  return {"object": "list", "data": model_list}
jarvis/jarvis_rag/cli.py CHANGED
@@ -88,7 +88,9 @@ def _create_custom_llm(platform_name: str, model_name: str) -> Optional[LLMInter
88
88
  registry = PlatformRegistry.get_global_platform_registry()
89
89
  platform_instance = registry.create_platform(platform_name)
90
90
  if not platform_instance:
91
- PrettyOutput.print(f"错误: 平台 '{platform_name}' 未找到。", OutputType.ERROR)
91
+ PrettyOutput.print(
92
+ f"错误: 平台 '{platform_name}' 未找到。", OutputType.ERROR
93
+ )
92
94
  return None
93
95
  platform_instance.set_model_name(model_name)
94
96
  platform_instance.set_suppress_output(True)
@@ -118,10 +120,14 @@ def _load_ragignore_spec() -> Tuple[Optional[pathspec.PathSpec], Optional[Path]]
118
120
  with open(ignore_file_to_use, "r", encoding="utf-8") as f:
119
121
  patterns = f.read().splitlines()
120
122
  spec = pathspec.PathSpec.from_lines("gitwildmatch", patterns)
121
- PrettyOutput.print(f"加载忽略规则: {ignore_file_to_use}", OutputType.SUCCESS)
123
+ PrettyOutput.print(
124
+ f"加载忽略规则: {ignore_file_to_use}", OutputType.SUCCESS
125
+ )
122
126
  return spec, project_root_path
123
127
  except Exception as e:
124
- PrettyOutput.print(f"加载 {ignore_file_to_use.name} 文件失败: {e}", OutputType.WARNING)
128
+ PrettyOutput.print(
129
+ f"加载 {ignore_file_to_use.name} 文件失败: {e}", OutputType.WARNING
130
+ )
125
131
 
126
132
  return None, None
127
133
 
@@ -147,7 +153,9 @@ def add_documents(
147
153
  "-e",
148
154
  help="嵌入模型的名称。覆盖全局配置。",
149
155
  ),
150
- db_path: Optional[Path] = typer.Option(None, "--db-path", help="向量数据库的路径。覆盖全局配置。"),
156
+ db_path: Optional[Path] = typer.Option(
157
+ None, "--db-path", help="向量数据库的路径。覆盖全局配置。"
158
+ ),
151
159
  batch_size: int = typer.Option(
152
160
  500,
153
161
  "--batch-size",
@@ -178,7 +186,9 @@ def add_documents(
178
186
  if is_likely_text_file(path):
179
187
  files_to_process.add(path)
180
188
  else:
181
- PrettyOutput.print(f"跳过可能的二进制文件: {path}", OutputType.WARNING)
189
+ PrettyOutput.print(
190
+ f"跳过可能的二进制文件: {path}", OutputType.WARNING
191
+ )
182
192
 
183
193
  if not files_to_process:
184
194
  PrettyOutput.print("在指定路径中未找到任何文本文件。", OutputType.WARNING)
@@ -202,14 +212,20 @@ def add_documents(
202
212
 
203
213
  ignored_count = initial_count - len(retained_files)
204
214
  if ignored_count > 0:
205
- PrettyOutput.print(f"根据 .ragignore 规则过滤掉 {ignored_count} 个文件。", OutputType.INFO)
215
+ PrettyOutput.print(
216
+ f"根据 .ragignore 规则过滤掉 {ignored_count} 个文件。", OutputType.INFO
217
+ )
206
218
  files_to_process = retained_files
207
219
 
208
220
  if not files_to_process:
209
- PrettyOutput.print("所有找到的文本文件都被忽略规则过滤掉了。", OutputType.WARNING)
221
+ PrettyOutput.print(
222
+ "所有找到的文本文件都被忽略规则过滤掉了。", OutputType.WARNING
223
+ )
210
224
  return
211
225
 
212
- PrettyOutput.print(f"发现 {len(files_to_process)} 个独立文件待处理。", OutputType.INFO)
226
+ PrettyOutput.print(
227
+ f"发现 {len(files_to_process)} 个独立文件待处理。", OutputType.INFO
228
+ )
213
229
 
214
230
  try:
215
231
  pipeline = JarvisRAGPipeline(
@@ -233,23 +249,32 @@ def add_documents(
233
249
  loader = TextLoader(str(file_path), encoding="utf-8")
234
250
 
235
251
  docs_batch.extend(loader.load())
236
- PrettyOutput.print(f"已加载: {file_path} (文件 {i + 1}/{total_files})", OutputType.INFO)
252
+ PrettyOutput.print(
253
+ f"已加载: {file_path} (文件 {i + 1}/{total_files})", OutputType.INFO
254
+ )
237
255
  except Exception as e:
238
256
  PrettyOutput.print(f"加载失败 {file_path}: {e}", OutputType.WARNING)
239
257
 
240
258
  # 当批处理已满或是最后一个文件时处理批处理
241
259
  if docs_batch and (len(docs_batch) >= batch_size or (i + 1) == total_files):
242
- PrettyOutput.print(f"正在处理批次,包含 {len(docs_batch)} 个文档...", OutputType.INFO)
260
+ PrettyOutput.print(
261
+ f"正在处理批次,包含 {len(docs_batch)} 个文档...", OutputType.INFO
262
+ )
243
263
  pipeline.add_documents(docs_batch)
244
264
  total_docs_added += len(docs_batch)
245
- PrettyOutput.print(f"成功添加 {len(docs_batch)} 个文档。", OutputType.SUCCESS)
265
+ PrettyOutput.print(
266
+ f"成功添加 {len(docs_batch)} 个文档。", OutputType.SUCCESS
267
+ )
246
268
  docs_batch = [] # 清空批处理
247
269
 
248
270
  if total_docs_added == 0:
249
271
  PrettyOutput.print("未能成功加载任何文档。", OutputType.ERROR)
250
272
  raise typer.Exit(code=1)
251
273
 
252
- PrettyOutput.print(f"成功将 {total_docs_added} 个文档的内容添加至集合 '{collection_name}'。", OutputType.SUCCESS)
274
+ PrettyOutput.print(
275
+ f"成功将 {total_docs_added} 个文档的内容添加至集合 '{collection_name}'。",
276
+ OutputType.SUCCESS,
277
+ )
253
278
 
254
279
  except Exception as e:
255
280
  PrettyOutput.print(f"发生严重错误: {e}", OutputType.ERROR)
@@ -264,7 +289,9 @@ def list_documents(
264
289
  "-c",
265
290
  help="向量数据库中集合的名称。",
266
291
  ),
267
- db_path: Optional[Path] = typer.Option(None, "--db-path", help="向量数据库的路径。覆盖全局配置。"),
292
+ db_path: Optional[Path] = typer.Option(
293
+ None, "--db-path", help="向量数据库的路径。覆盖全局配置。"
294
+ ),
268
295
  ):
269
296
  """列出指定集合中的所有唯一文档。"""
270
297
  try:
@@ -289,10 +316,15 @@ def list_documents(
289
316
  sources.add(source)
290
317
 
291
318
  if not sources:
292
- PrettyOutput.print("知识库中没有找到任何带有源信息的文档。", OutputType.INFO)
319
+ PrettyOutput.print(
320
+ "知识库中没有找到任何带有源信息的文档。", OutputType.INFO
321
+ )
293
322
  return
294
323
 
295
- PrettyOutput.print(f"知识库 '{collection_name}' 中共有 {len(sources)} 个独立文档:", OutputType.INFO)
324
+ PrettyOutput.print(
325
+ f"知识库 '{collection_name}' 中共有 {len(sources)} 个独立文档:",
326
+ OutputType.INFO,
327
+ )
296
328
  for i, source in enumerate(sorted(list(sources)), 1):
297
329
  PrettyOutput.print(f" {i}. {source}", OutputType.INFO)
298
330
 
@@ -316,7 +348,9 @@ def retrieve(
316
348
  "-e",
317
349
  help="嵌入模型的名称。覆盖全局配置。",
318
350
  ),
319
- db_path: Optional[Path] = typer.Option(None, "--db-path", help="向量数据库的路径。覆盖全局配置。"),
351
+ db_path: Optional[Path] = typer.Option(
352
+ None, "--db-path", help="向量数据库的路径。覆盖全局配置。"
353
+ ),
320
354
  n_results: int = typer.Option(5, "--top-n", help="要检索的文档数量。"),
321
355
  ):
322
356
  """仅从RAG知识库检索文档并打印结果。"""
@@ -341,16 +375,16 @@ def retrieve(
341
375
  PrettyOutput.print("未找到相关文档。", OutputType.INFO)
342
376
  return
343
377
 
344
- PrettyOutput.print(f"成功检索到 {len(retrieved_docs)} 个文档:", OutputType.SUCCESS)
378
+ PrettyOutput.print(
379
+ f"成功检索到 {len(retrieved_docs)} 个文档:", OutputType.SUCCESS
380
+ )
345
381
  from jarvis.jarvis_utils.globals import console
346
382
 
347
383
  for i, doc in enumerate(retrieved_docs, 1):
348
384
  source = doc.metadata.get("source", "未知来源")
349
385
  content = doc.page_content
350
386
  panel_title = f"文档 {i} | 来源: {source}"
351
- console.print(
352
- f"\n[bold magenta]{panel_title}[/bold magenta]"
353
- )
387
+ console.print(f"\n[bold magenta]{panel_title}[/bold magenta]")
354
388
  console.print(Markdown(f"```\n{content}\n```"))
355
389
 
356
390
  except Exception as e:
@@ -373,7 +407,9 @@ def query(
373
407
  "-e",
374
408
  help="嵌入模型的名称。覆盖全局配置。",
375
409
  ),
376
- db_path: Optional[Path] = typer.Option(None, "--db-path", help="向量数据库的路径。覆盖全局配置。"),
410
+ db_path: Optional[Path] = typer.Option(
411
+ None, "--db-path", help="向量数据库的路径。覆盖全局配置。"
412
+ ),
377
413
  platform: Optional[str] = typer.Option(
378
414
  None,
379
415
  "--platform",
@@ -436,7 +472,10 @@ except ImportError:
436
472
 
437
473
  def _check_rag_dependencies():
438
474
  if not _RAG_INSTALLED:
439
- PrettyOutput.print("RAG依赖项未安装。请运行 'pip install \"jarvis-ai-assistant[rag]\"' 来使用此命令。", OutputType.ERROR)
475
+ PrettyOutput.print(
476
+ "RAG依赖项未安装。请运行 'pip install \"jarvis-ai-assistant[rag]\"' 来使用此命令。",
477
+ OutputType.ERROR,
478
+ )
440
479
  raise typer.Exit(code=1)
441
480
 
442
481
 
@@ -24,7 +24,9 @@ class EmbeddingManager:
24
24
  """
25
25
  self.model_name = model_name
26
26
 
27
- PrettyOutput.print(f"初始化嵌入管理器, 模型: '{self.model_name}'...", OutputType.INFO)
27
+ PrettyOutput.print(
28
+ f"初始化嵌入管理器, 模型: '{self.model_name}'...", OutputType.INFO
29
+ )
28
30
 
29
31
  # 缓存的salt是模型名称,以防止冲突
30
32
  self.cache = EmbeddingCache(cache_dir=cache_dir, salt=self.model_name)
@@ -43,8 +45,13 @@ class EmbeddingManager:
43
45
  show_progress=True,
44
46
  )
45
47
  except Exception as e:
46
- PrettyOutput.print(f"加载嵌入模型 '{self.model_name}' 时出错: {e}", OutputType.ERROR)
47
- PrettyOutput.print("请确保您已安装 'sentence_transformers' 'torch'。", OutputType.WARNING)
48
+ PrettyOutput.print(
49
+ f"加载嵌入模型 '{self.model_name}' 时出错: {e}", OutputType.ERROR
50
+ )
51
+ PrettyOutput.print(
52
+ "请确保您已安装 'sentence_transformers' 和 'torch'。",
53
+ OutputType.WARNING,
54
+ )
48
55
  raise
49
56
 
50
57
  def embed_documents(self, texts: List[str]) -> List[List[float]]:
@@ -103,7 +103,10 @@ class JarvisPlatform_LLM(LLMInterface):
103
103
  self.registry = PlatformRegistry.get_global_platform_registry()
104
104
  self.platform: BasePlatform = self.registry.get_normal_platform()
105
105
  self.platform.set_suppress_output(False) # 确保模型没有控制台输出
106
- PrettyOutput.print(f"已初始化 Jarvis 平台 LLM,模型: {self.platform.name()}", OutputType.INFO)
106
+ PrettyOutput.print(
107
+ f"已初始化 Jarvis 平台 LLM,模型: {self.platform.name()}",
108
+ OutputType.INFO,
109
+ )
107
110
  except Exception as e:
108
111
  PrettyOutput.print(f"初始化 Jarvis 平台 LLM 失败: {e}", OutputType.ERROR)
109
112
  raise
@@ -58,7 +58,9 @@ English version of the query
58
58
  """
59
59
  prompt = self.rewrite_prompt_template.format(query=query)
60
60
  PrettyOutput.print(
61
- "正在将原始查询重写为多个搜索查询...", output_type=OutputType.INFO, timestamp=False
61
+ "正在将原始查询重写为多个搜索查询...",
62
+ output_type=OutputType.INFO,
63
+ timestamp=False,
62
64
  )
63
65
 
64
66
  import re
@@ -75,7 +75,9 @@ class JarvisRAGPipeline:
75
75
  self._reranker: Optional[Reranker] = None
76
76
  self._query_rewriter: Optional[QueryRewriter] = None
77
77
 
78
- PrettyOutput.print("JarvisRAGPipeline 初始化成功 (模型按需加载).", OutputType.SUCCESS)
78
+ PrettyOutput.print(
79
+ "JarvisRAGPipeline 初始化成功 (模型按需加载).", OutputType.SUCCESS
80
+ )
79
81
 
80
82
  def _get_embedding_manager(self) -> EmbeddingManager:
81
83
  if self._embedding_manager is None:
@@ -209,7 +211,10 @@ class JarvisRAGPipeline:
209
211
 
210
212
  # 3. 根据*原始*查询对统一的候选池进行重排
211
213
  if self.use_rerank:
212
- PrettyOutput.print(f"正在对 {len(unique_candidate_docs)} 个候选文档进行重排(基于原始问题)...", OutputType.INFO)
214
+ PrettyOutput.print(
215
+ f"正在对 {len(unique_candidate_docs)} 个候选文档进行重排(基于原始问题)...",
216
+ OutputType.INFO,
217
+ )
213
218
  retrieved_docs = self._get_reranker().rerank(
214
219
  query_text, unique_candidate_docs, top_n=n_results
215
220
  )
@@ -274,7 +279,10 @@ class JarvisRAGPipeline:
274
279
 
275
280
  # 3. 重排
276
281
  if self.use_rerank:
277
- PrettyOutput.print(f"正在对 {len(unique_candidate_docs)} 个候选文档进行重排...", OutputType.INFO)
282
+ PrettyOutput.print(
283
+ f"正在对 {len(unique_candidate_docs)} 个候选文档进行重排...",
284
+ OutputType.INFO,
285
+ )
278
286
  retrieved_docs = self._get_reranker().rerank(
279
287
  query_text, unique_candidate_docs, top_n=n_results
280
288
  )