jarvis-ai-assistant 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/jarvis_agent/__init__.py +96 -13
  3. jarvis/jarvis_agent/agent_manager.py +0 -3
  4. jarvis/jarvis_agent/jarvis.py +2 -17
  5. jarvis/jarvis_agent/main.py +2 -8
  6. jarvis/jarvis_code_agent/code_agent.py +7 -14
  7. jarvis/jarvis_code_analysis/code_review.py +12 -40
  8. jarvis/jarvis_data/config_schema.json +4 -18
  9. jarvis/jarvis_git_utils/git_commiter.py +7 -22
  10. jarvis/jarvis_mcp/sse_mcp_client.py +4 -3
  11. jarvis/jarvis_mcp/streamable_mcp_client.py +9 -8
  12. jarvis/jarvis_memory_organizer/memory_organizer.py +46 -53
  13. jarvis/jarvis_methodology/main.py +4 -2
  14. jarvis/jarvis_platform/base.py +49 -12
  15. jarvis/jarvis_platform/kimi.py +16 -22
  16. jarvis/jarvis_platform/registry.py +7 -14
  17. jarvis/jarvis_platform/tongyi.py +21 -32
  18. jarvis/jarvis_platform/yuanbao.py +15 -17
  19. jarvis/jarvis_platform_manager/main.py +14 -51
  20. jarvis/jarvis_rag/cli.py +14 -13
  21. jarvis/jarvis_rag/embedding_manager.py +18 -6
  22. jarvis/jarvis_rag/llm_interface.py +0 -2
  23. jarvis/jarvis_rag/rag_pipeline.py +26 -15
  24. jarvis/jarvis_rag/retriever.py +33 -25
  25. jarvis/jarvis_tools/cli/main.py +22 -15
  26. jarvis/jarvis_tools/edit_file.py +1 -3
  27. jarvis/jarvis_tools/file_analyzer.py +12 -6
  28. jarvis/jarvis_tools/registry.py +16 -11
  29. jarvis/jarvis_tools/sub_agent.py +1 -1
  30. jarvis/jarvis_tools/sub_code_agent.py +1 -4
  31. jarvis/jarvis_utils/config.py +14 -10
  32. jarvis/jarvis_utils/input.py +6 -3
  33. jarvis/jarvis_utils/methodology.py +11 -6
  34. jarvis/jarvis_utils/utils.py +30 -13
  35. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/METADATA +10 -3
  36. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/RECORD +40 -40
  37. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/WHEEL +0 -0
  38. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/entry_points.txt +0 -0
  39. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/licenses/LICENSE +0 -0
  40. {jarvis_ai_assistant-0.3.22.dist-info → jarvis_ai_assistant-0.3.24.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,6 @@ import typer
11
11
  from jarvis.jarvis_utils.config import (
12
12
  get_normal_platform_name,
13
13
  get_normal_model_name,
14
- get_thinking_platform_name,
15
- get_thinking_model_name,
16
14
  )
17
15
 
18
16
  from jarvis.jarvis_platform.registry import PlatformRegistry
@@ -66,7 +64,7 @@ def list_platforms(
66
64
 
67
65
 
68
66
  def chat_with_model(
69
- platform_name: str, model_name: str, system_prompt: str, llm_type: str = "normal"
67
+ platform_name: str, model_name: str, system_prompt: str
70
68
  ) -> None:
71
69
  """与指定平台和模型进行对话。
72
70
 
@@ -74,7 +72,7 @@ def chat_with_model(
74
72
  platform_name: 平台名称
75
73
  model_name: 模型名称
76
74
  system_prompt: 系统提示语
77
- llm_type: LLM类型,可选值:'normal'(普通)或 'thinking'(思考模式)
75
+
78
76
  """
79
77
  registry = PlatformRegistry.get_global_platform_registry()
80
78
  conversation_history: List[Dict[str, str]] = [] # 存储对话记录
@@ -360,32 +358,19 @@ def chat_command(
360
358
  None, "--platform", "-p", help="指定要使用的平台"
361
359
  ),
362
360
  model: Optional[str] = typer.Option(None, "--model", "-m", help="指定要使用的模型"),
363
- llm_type: str = typer.Option(
364
- "normal",
365
- "-t",
366
- "--llm-type",
367
- help="使用的LLM类型,可选值:'normal'(普通)或 'thinking'(思考模式)",
368
- ),
361
+
369
362
  llm_group: Optional[str] = typer.Option(
370
363
  None, "-g", "--llm-group", help="使用的模型组,覆盖配置文件中的设置"
371
364
  ),
372
365
  ) -> None:
373
366
  """与指定平台和模型聊天。"""
374
367
  # 如果未提供平台或模型参数,则从config获取默认值
375
- platform = platform or (
376
- get_thinking_platform_name(llm_group)
377
- if llm_type == "thinking"
378
- else get_normal_platform_name(llm_group)
379
- )
380
- model = model or (
381
- get_thinking_model_name(llm_group)
382
- if llm_type == "thinking"
383
- else get_normal_model_name(llm_group)
384
- )
368
+ platform = platform or get_normal_platform_name(llm_group)
369
+ model = model or get_normal_model_name(llm_group)
385
370
 
386
371
  if not validate_platform_model(platform, model):
387
372
  return
388
- chat_with_model(platform, model, "", llm_type)
373
+ chat_with_model(platform, model, "")
389
374
 
390
375
 
391
376
  @app.command("service")
@@ -444,12 +429,7 @@ def role_command(
444
429
  model: Optional[str] = typer.Option(
445
430
  None, "--model", "-m", help="指定要使用的模型,覆盖角色配置"
446
431
  ),
447
- llm_type: Optional[str] = typer.Option(
448
- None,
449
- "-t",
450
- "--llm-type",
451
- help="使用的LLM类型,可选值:'normal'(普通)或 'thinking'(思考模式),覆盖角色配置",
452
- ),
432
+
453
433
  llm_group: Optional[str] = typer.Option(
454
434
  None, "-g", "--llm-group", help="使用的模型组,覆盖配置文件中的设置"
455
435
  ),
@@ -483,54 +463,37 @@ def role_command(
483
463
  PrettyOutput.print("无效的选择", OutputType.ERROR)
484
464
  return
485
465
 
486
- # 获取llm_type,优先使用命令行参数,否则使用角色配置,默认为normal
487
- role_llm_type = llm_type or selected_role.get("llm_type", "normal")
466
+
488
467
 
489
468
  # 初始化平台和模型
490
469
  # 如果提供了platform或model参数,优先使用命令行参数
491
- # 否则,如果提供了llm_group,根据llm_type从配置中获取
470
+ # 否则,如果提供了 llm_group,则从配置中获取
492
471
  # 最后才使用角色配置中的platform和model
493
472
  if platform:
494
473
  platform_name = platform
495
474
  elif llm_group:
496
- platform_name = (
497
- get_thinking_platform_name(llm_group)
498
- if role_llm_type == "thinking"
499
- else get_normal_platform_name(llm_group)
500
- )
475
+ platform_name = get_normal_platform_name(llm_group)
501
476
  else:
502
477
  platform_name = selected_role.get("platform")
503
478
  if not platform_name:
504
479
  # 如果角色配置中没有platform,使用默认配置
505
- platform_name = (
506
- get_thinking_platform_name()
507
- if role_llm_type == "thinking"
508
- else get_normal_platform_name()
509
- )
480
+ platform_name = get_normal_platform_name()
510
481
 
511
482
  if model:
512
483
  model_name = model
513
484
  elif llm_group:
514
- model_name = (
515
- get_thinking_model_name(llm_group)
516
- if role_llm_type == "thinking"
517
- else get_normal_model_name(llm_group)
518
- )
485
+ model_name = get_normal_model_name(llm_group)
519
486
  else:
520
487
  model_name = selected_role.get("model")
521
488
  if not model_name:
522
489
  # 如果角色配置中没有model,使用默认配置
523
- model_name = (
524
- get_thinking_model_name()
525
- if role_llm_type == "thinking"
526
- else get_normal_model_name()
527
- )
490
+ model_name = get_normal_model_name()
528
491
 
529
492
  system_prompt = selected_role.get("system_prompt", "")
530
493
 
531
494
  # 开始对话
532
495
  PrettyOutput.print(f"已选择角色: {selected_role['name']}", OutputType.SUCCESS)
533
- chat_with_model(platform_name, model_name, system_prompt, role_llm_type)
496
+ chat_with_model(platform_name, model_name, system_prompt)
534
497
 
535
498
 
536
499
  def main() -> None:
jarvis/jarvis_rag/cli.py CHANGED
@@ -240,6 +240,7 @@ def add_documents(
240
240
 
241
241
  sorted_files = sorted(list(files_to_process))
242
242
  total_files = len(sorted_files)
243
+ loaded_msgs: List[str] = []
243
244
 
244
245
  for i, file_path in enumerate(sorted_files):
245
246
  try:
@@ -249,14 +250,15 @@ def add_documents(
249
250
  loader = TextLoader(str(file_path), encoding="utf-8")
250
251
 
251
252
  docs_batch.extend(loader.load())
252
- PrettyOutput.print(
253
- f"已加载: {file_path} (文件 {i + 1}/{total_files})", OutputType.INFO
254
- )
253
+ loaded_msgs.append(f"已加载: {file_path} (文件 {i + 1}/{total_files})")
255
254
  except Exception as e:
256
255
  PrettyOutput.print(f"加载失败 {file_path}: {e}", OutputType.WARNING)
257
256
 
258
257
  # 当批处理已满或是最后一个文件时处理批处理
259
258
  if docs_batch and (len(docs_batch) >= batch_size or (i + 1) == total_files):
259
+ if loaded_msgs:
260
+ PrettyOutput.print("\n".join(loaded_msgs), OutputType.INFO)
261
+ loaded_msgs = []
260
262
  PrettyOutput.print(
261
263
  f"正在处理批次,包含 {len(docs_batch)} 个文档...", OutputType.INFO
262
264
  )
@@ -267,6 +269,10 @@ def add_documents(
267
269
  )
268
270
  docs_batch = [] # 清空批处理
269
271
 
272
+ # 最后统一打印可能残留的“已加载”信息
273
+ if loaded_msgs:
274
+ PrettyOutput.print("\n".join(loaded_msgs), OutputType.INFO)
275
+ loaded_msgs = []
270
276
  if total_docs_added == 0:
271
277
  PrettyOutput.print("未能成功加载任何文档。", OutputType.ERROR)
272
278
  raise typer.Exit(code=1)
@@ -321,12 +327,11 @@ def list_documents(
321
327
  )
322
328
  return
323
329
 
324
- PrettyOutput.print(
325
- f"知识库 '{collection_name}' 中共有 {len(sources)} 个独立文档:",
326
- OutputType.INFO,
327
- )
330
+ # 避免在循环中逐条打印,先拼接后统一打印
331
+ lines = [f"知识库 '{collection_name}' 中共有 {len(sources)} 个独立文档:"]
328
332
  for i, source in enumerate(sorted(list(sources)), 1):
329
- PrettyOutput.print(f" {i}. {source}", OutputType.INFO)
333
+ lines.append(f" {i}. {source}")
334
+ PrettyOutput.print("\n".join(lines), OutputType.INFO)
330
335
 
331
336
  except Exception as e:
332
337
  PrettyOutput.print(f"发生错误: {e}", OutputType.ERROR)
@@ -450,11 +455,7 @@ def query(
450
455
  PrettyOutput.print(f"正在查询: '{question}'", OutputType.INFO)
451
456
  answer = pipeline.query(question)
452
457
 
453
- PrettyOutput.print("答案:", OutputType.INFO)
454
- # 我们仍然可以使用 rich.markdown.Markdown,因为 PrettyOutput 底层使用了 rich
455
- from jarvis.jarvis_utils.globals import console
456
-
457
- console.print(Markdown(answer))
458
+ PrettyOutput.print(answer, OutputType.SUCCESS)
458
459
 
459
460
  except Exception as e:
460
461
  PrettyOutput.print(f"发生错误: {e}", OutputType.ERROR)
@@ -38,12 +38,24 @@ class EmbeddingManager:
38
38
  encode_kwargs = {"normalize_embeddings": True}
39
39
 
40
40
  try:
41
- return HuggingFaceEmbeddings(
42
- model_name=self.model_name,
43
- model_kwargs=model_kwargs,
44
- encode_kwargs=encode_kwargs,
45
- show_progress=True,
46
- )
41
+ # First try to load model locally
42
+ try:
43
+ from sentence_transformers import SentenceTransformer
44
+ local_model = SentenceTransformer(self.model_name, device=model_kwargs["device"])
45
+ return HuggingFaceEmbeddings(
46
+ client=local_model,
47
+ model_name=self.model_name,
48
+ model_kwargs=model_kwargs,
49
+ encode_kwargs=encode_kwargs,
50
+ )
51
+ except Exception:
52
+ # Fall back to remote download if local loading fails
53
+ return HuggingFaceEmbeddings(
54
+ model_name=self.model_name,
55
+ model_kwargs=model_kwargs,
56
+ encode_kwargs=encode_kwargs,
57
+ show_progress=True,
58
+ )
47
59
  except Exception as e:
48
60
  PrettyOutput.print(
49
61
  f"加载嵌入模型 '{self.model_name}' 时出错: {e}", OutputType.ERROR
@@ -47,13 +47,11 @@ class ToolAgent_LLM(LLMInterface):
47
47
  # 为代理提供一个通用的系统提示
48
48
  self.system_prompt = "You are a helpful assistant. Please answer the user's question based on the provided context. You can use tools to find more information if needed."
49
49
  self.summary_prompt = """
50
- <report>
51
50
  请为本次问答任务生成一个总结报告,包含以下内容:
52
51
 
53
52
  1. **原始问题**: 重述用户最开始提出的问题。
54
53
  2. **关键信息来源**: 总结你是基于哪些关键信息或文件得出的结论。
55
54
  3. **最终答案**: 给出最终的、精炼的回答。
56
- </report>
57
55
  """
58
56
 
59
57
  def generate(self, prompt: str, **kwargs) -> str:
@@ -161,19 +161,24 @@ class JarvisRAGPipeline:
161
161
  if not changed and not deleted:
162
162
  return
163
163
  # 打印摘要
164
- PrettyOutput.print(
165
- f"检测到索引可能不一致:变更 {len(changed)} 个,删除 {len(deleted)} 个。",
166
- OutputType.WARNING,
167
- )
168
- for p in (changed[:3] if changed else []):
169
- PrettyOutput.print(f" 变更: {p}", OutputType.WARNING)
170
- for p in (deleted[:3] if deleted else []):
171
- PrettyOutput.print(f" 删除: {p}", OutputType.WARNING)
164
+ # 先拼接列表信息再统一打印,避免循环中逐条打印
165
+ lines = [
166
+ f"检测到索引可能不一致:变更 {len(changed)} 个,删除 {len(deleted)} 个。"
167
+ ]
168
+ if changed:
169
+ lines.extend([f" 变更: {p}" for p in changed[:3]])
170
+ if deleted:
171
+ lines.extend([f" 删除: {p}" for p in deleted[:3]])
172
+ PrettyOutput.print("\n".join(lines), OutputType.WARNING)
172
173
  # 询问用户
173
- if get_yes_no("检测到索引变更,是否现在更新索引后再开始检索?", default=True):
174
+ if get_yes_no(
175
+ "检测到索引变更,是否现在更新索引后再开始检索?", default=True
176
+ ):
174
177
  retriever.update_index_for_changes(changed, deleted)
175
178
  else:
176
- PrettyOutput.print("已跳过索引更新,将直接使用当前索引进行检索。", OutputType.INFO)
179
+ PrettyOutput.print(
180
+ "已跳过索引更新,将直接使用当前索引进行检索。", OutputType.INFO
181
+ )
177
182
  except Exception as e:
178
183
  PrettyOutput.print(f"检索前索引检查失败:{e}", OutputType.WARNING)
179
184
 
@@ -228,9 +233,12 @@ class JarvisRAGPipeline:
228
233
  rewritten_queries = self._get_query_rewriter().rewrite(query_text)
229
234
 
230
235
  # 2. 为每个重写的查询检索初始候选文档
236
+ PrettyOutput.print(
237
+ "将为以下查询变体进行混合检索:\n" + "\n".join([f" - {q}" for q in rewritten_queries]),
238
+ OutputType.INFO,
239
+ )
231
240
  all_candidate_docs = []
232
241
  for q in rewritten_queries:
233
- PrettyOutput.print(f"正在为查询变体 '{q}' 进行混合检索...", OutputType.INFO)
234
242
  candidates = self._get_retriever().retrieve(
235
243
  q, n_results=n_results * 2, use_bm25=self.use_bm25
236
244
  )
@@ -269,9 +277,9 @@ class JarvisRAGPipeline:
269
277
  )
270
278
  )
271
279
  if sources:
272
- PrettyOutput.print("根据以下文档回答:", OutputType.INFO)
273
- for source in sources:
274
- PrettyOutput.print(f" - {source}", OutputType.INFO)
280
+ # 合并来源列表后一次性打印,避免多次加框
281
+ lines = ["根据以下文档回答:"] + [f" - {source}" for source in sources]
282
+ PrettyOutput.print("\n".join(lines), OutputType.INFO)
275
283
 
276
284
  # 4. 创建最终提示并生成答案
277
285
  # 我们使用原始的query_text作为给LLM的最终提示
@@ -299,9 +307,12 @@ class JarvisRAGPipeline:
299
307
  rewritten_queries = self._get_query_rewriter().rewrite(query_text)
300
308
 
301
309
  # 2. 检索候选文档
310
+ PrettyOutput.print(
311
+ "将为以下查询变体进行混合检索:\n" + "\n".join([f" - {q}" for q in rewritten_queries]),
312
+ OutputType.INFO,
313
+ )
302
314
  all_candidate_docs = []
303
315
  for q in rewritten_queries:
304
- PrettyOutput.print(f"正在为查询变体 '{q}' 进行混合检索...", OutputType.INFO)
305
316
  candidates = self._get_retriever().retrieve(
306
317
  q, n_results=n_results * 2, use_bm25=self.use_bm25
307
318
  )
@@ -184,32 +184,26 @@ class ChromaRetriever:
184
184
  deleted = result["deleted"]
185
185
  if not changed and not deleted:
186
186
  return
187
+ # 为避免在循环中逐条打印,先拼接后统一打印
188
+ lines: list[str] = []
187
189
  if changed:
188
- PrettyOutput.print(
189
- f"检测到 {len(changed)} 个已索引文件发生变化,建议重新索引以保证检索准确性。",
190
- OutputType.WARNING,
190
+ lines.append(
191
+ f"检测到 {len(changed)} 个已索引文件发生变化,建议重新索引以保证检索准确性。"
191
192
  )
192
- for p in changed[:5]:
193
- PrettyOutput.print(f" 变更: {p}", OutputType.WARNING)
193
+ lines.extend([f" 变更: {p}" for p in changed[:5]])
194
194
  if len(changed) > 5:
195
- PrettyOutput.print(
196
- f" ... 以及另外 {len(changed) - 5} 个文件", OutputType.WARNING
197
- )
195
+ lines.append(f" ... 以及另外 {len(changed) - 5} 个文件")
198
196
  if deleted:
199
- PrettyOutput.print(
200
- f"检测到 {len(deleted)} 个已索引文件已被删除,建议清理并重新索引。",
201
- OutputType.WARNING,
197
+ lines.append(
198
+ f"检测到 {len(deleted)} 个已索引文件已被删除,建议清理并重新索引。"
202
199
  )
203
- for p in deleted[:5]:
204
- PrettyOutput.print(f" 删除: {p}", OutputType.WARNING)
200
+ lines.extend([f" 删除: {p}" for p in deleted[:5]])
205
201
  if len(deleted) > 5:
206
- PrettyOutput.print(
207
- f" ... 以及另外 {len(deleted) - 5} 个文件", OutputType.WARNING
208
- )
209
- PrettyOutput.print(
210
- "提示:请使用 'jarvis-rag add <路径>' 重新索引相关文件,以更新向量库与BM25索引。",
211
- OutputType.INFO,
202
+ lines.append(f" ... 以及另外 {len(deleted) - 5} 个文件")
203
+ lines.append(
204
+ "提示:请使用 'jarvis-rag add <路径>' 重新索引相关文件,以更新向量库与BM25索引。"
212
205
  )
206
+ PrettyOutput.print("\n".join(lines), OutputType.WARNING)
213
207
 
214
208
  def detect_index_changes(self) -> Dict[str, List[str]]:
215
209
  """
@@ -231,7 +225,9 @@ class ChromaRetriever:
231
225
  removed += 1
232
226
  if removed > 0:
233
227
  self._save_manifest(manifest)
234
- PrettyOutput.print(f"已从索引清单中移除 {removed} 个已删除的源文件记录。", OutputType.INFO)
228
+ PrettyOutput.print(
229
+ f"已从索引清单中移除 {removed} 个已删除的源文件记录。", OutputType.INFO
230
+ )
235
231
 
236
232
  def update_index_for_changes(self, changed: List[str], deleted: List[str]) -> None:
237
233
  """
@@ -240,21 +236,29 @@ class ChromaRetriever:
240
236
  - 对 changed: 先删除旧条目,再从源文件重建并添加
241
237
  - 最后:从集合重建BM25索引,更新manifest
242
238
  """
243
- changed = list(dict.fromkeys([p for p in (changed or []) if isinstance(p, str)]))
244
- deleted = list(dict.fromkeys([p for p in (deleted or []) if isinstance(p, str)]))
239
+ changed = list(
240
+ dict.fromkeys([p for p in (changed or []) if isinstance(p, str)])
241
+ )
242
+ deleted = list(
243
+ dict.fromkeys([p for p in (deleted or []) if isinstance(p, str)])
244
+ )
245
245
 
246
246
  if not changed and not deleted:
247
247
  return
248
248
 
249
249
  # 先处理删除
250
+ delete_errors: list[str] = []
250
251
  for src in deleted:
251
252
  try:
252
253
  self.collection.delete(where={"source": src}) # type: ignore[arg-type]
253
254
  except Exception as e:
254
- PrettyOutput.print(f"删除源 '{src}' 时出错: {e}", OutputType.WARNING)
255
+ delete_errors.append(f"删除源 '{src}' 时出错: {e}")
256
+ if delete_errors:
257
+ PrettyOutput.print("\n".join(delete_errors), OutputType.WARNING)
255
258
 
256
259
  # 再处理变更(重建)
257
260
  docs_to_add: List[Document] = []
261
+ rebuild_errors: list[str] = []
258
262
  for src in changed:
259
263
  try:
260
264
  # 删除旧条目
@@ -265,9 +269,13 @@ class ChromaRetriever:
265
269
  # 读取源文件内容(作为单文档载入,由 add_documents 进行拆分与嵌入)
266
270
  with open(src, "r", encoding="utf-8", errors="ignore") as f:
267
271
  content = f.read()
268
- docs_to_add.append(Document(page_content=content, metadata={"source": src}))
272
+ docs_to_add.append(
273
+ Document(page_content=content, metadata={"source": src})
274
+ )
269
275
  except Exception as e:
270
- PrettyOutput.print(f"重建源 '{src}' 内容时出错: {e}", OutputType.WARNING)
276
+ rebuild_errors.append(f"重建源 '{src}' 内容时出错: {e}")
277
+ if rebuild_errors:
278
+ PrettyOutput.print("\n".join(rebuild_errors), OutputType.WARNING)
271
279
 
272
280
  if docs_to_add:
273
281
  try:
@@ -39,18 +39,22 @@ def list_tools(
39
39
  )
40
40
  else:
41
41
  PrettyOutput.section("可用工具列表", OutputType.SYSTEM)
42
+ # 为避免 PrettyOutput 对每行加框造成信息稀疏,先拼接字符串再统一打印
43
+ lines = []
44
+ import json as _json # local import to ensure available
42
45
  for tool in tools:
43
- PrettyOutput.print(f"\n{tool['name']}", OutputType.SUCCESS)
44
- PrettyOutput.print(f" 描述: {tool['description']}", OutputType.INFO)
46
+ lines.append(f"\n{tool['name']}")
47
+ lines.append(f" 描述: {tool['description']}")
45
48
  if detailed:
46
- PrettyOutput.print(" 参数:", OutputType.INFO)
47
- import json as _json # local import to ensure available
48
-
49
- PrettyOutput.print(
50
- _json.dumps(tool["parameters"], ensure_ascii=False, indent=2),
51
- OutputType.CODE,
52
- lang="json",
53
- )
49
+ lines.append(" 参数:")
50
+ # 使用 Markdown 代码块统一展示参数
51
+ lines.append("```json")
52
+ try:
53
+ lines.append(_json.dumps(tool["parameters"], ensure_ascii=False, indent=2))
54
+ except Exception:
55
+ lines.append(str(tool.get("parameters")))
56
+ lines.append("```")
57
+ PrettyOutput.print("\n".join(lines), OutputType.INFO, lang="markdown")
54
58
 
55
59
 
56
60
  @app.command("stat")
@@ -202,15 +206,18 @@ def call_tool(
202
206
  missing_params = [p for p in required_params if p not in tool_args]
203
207
 
204
208
  if missing_params:
205
- PrettyOutput.print(
206
- f"错误: 缺少必需参数: {', '.join(missing_params)}", OutputType.ERROR
207
- )
208
- PrettyOutput.print("\n参数说明:", OutputType.INFO)
209
+ # 先拼接提示与参数说明,再统一打印,避免循环中逐条打印
209
210
  params = tool_obj.parameters.get("properties", {})
211
+ lines = [
212
+ f"错误: 缺少必需参数: {', '.join(missing_params)}",
213
+ "",
214
+ "参数说明:",
215
+ ]
210
216
  for param_name in required_params:
211
217
  param_info = params.get(param_name, {})
212
218
  desc = param_info.get("description", "无描述")
213
- PrettyOutput.print(f" - {param_name}: {desc}", OutputType.INFO)
219
+ lines.append(f" - {param_name}: {desc}")
220
+ PrettyOutput.print("\n".join(lines), OutputType.ERROR)
214
221
  raise typer.Exit(code=1)
215
222
 
216
223
  result = registry.execute_tool(tool_name, tool_args)
@@ -146,9 +146,7 @@ class FileSearchReplaceTool:
146
146
  }
147
147
  )
148
148
  else:
149
- PrettyOutput.print(
150
- f"文件 {file_path} 处理失败", OutputType.ERROR
151
- )
149
+ PrettyOutput.print(f"文件 {file_path} 处理失败", OutputType.ERROR)
152
150
  file_results.append(
153
151
  {
154
152
  "file": file_path,
@@ -30,7 +30,7 @@ class FileAnalyzerTool:
30
30
 
31
31
  @staticmethod
32
32
  def check() -> bool:
33
- return PlatformRegistry().get_thinking_platform().support_upload_files()
33
+ return PlatformRegistry().get_normal_platform().support_upload_files()
34
34
 
35
35
  def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
36
36
  """执行文件分析操作
@@ -45,25 +45,31 @@ class FileAnalyzerTool:
45
45
  file_paths = args["file_paths"]
46
46
  prompt = args["prompt"]
47
47
 
48
- # 验证文件路径
48
+ # 验证文件路径(先收集不存在的文件,统一打印一次)
49
49
  valid_files = []
50
+ missing_files = []
50
51
  for file_path in file_paths:
51
52
  if os.path.exists(file_path):
52
53
  valid_files.append(file_path)
53
54
  else:
54
- PrettyOutput.print(f"文件不存在: {file_path}", OutputType.WARNING)
55
+ missing_files.append(file_path)
56
+ if missing_files:
57
+ PrettyOutput.print(
58
+ "以下文件不存在:\n" + "\n".join(f" - {p}" for p in missing_files),
59
+ OutputType.WARNING,
60
+ )
55
61
 
56
62
  if not valid_files:
57
63
  return {"success": False, "stdout": "", "stderr": "没有找到有效的文件"}
58
64
 
59
- # 创建thinking平台实例
60
- platform = PlatformRegistry().get_thinking_platform()
65
+ # 创建平台实例
66
+ platform = PlatformRegistry().get_normal_platform()
61
67
 
62
68
  if not platform:
63
69
  return {
64
70
  "success": False,
65
71
  "stdout": "",
66
- "stderr": "无法创建thinking平台实例",
72
+ "stderr": "无法创建平台实例",
67
73
  }
68
74
 
69
75
  # 设置系统消息
@@ -73,7 +73,7 @@ arguments:
73
73
 
74
74
  <string_format>
75
75
  # 📝 字符串参数格式
76
- 始终使用 |2 语法表示字符串参数,防止多行字符串行首空格引起歧义:
76
+ 使用 |2 语法表示字符串参数,防止多行字符串行首空格引起歧义。
77
77
 
78
78
  {ot("TOOL_CALL")}
79
79
  want: 当前的git状态,期望获取xxx的提交记录
@@ -81,7 +81,7 @@ name: execute_script
81
81
 
82
82
  arguments:
83
83
  interpreter: bash
84
- script_content: |2
84
+ script_content: |
85
85
  git status --porcelain
86
86
  {ct("TOOL_CALL")}
87
87
  </string_format>
@@ -98,7 +98,6 @@ arguments:
98
98
  <common_errors>
99
99
  # ⚠️ 常见错误
100
100
  - 同时调用多个工具
101
- - 字符串参数缺少 |2
102
101
  - 假设工具结果
103
102
  - 创建虚构对话
104
103
  - 在没有所需信息的情况下继续
@@ -276,14 +275,17 @@ class ToolRegistry(OutputHandlerProtocol):
276
275
  # 如果配置了 use 列表,只保留列表中的工具
277
276
  if use_list:
278
277
  filtered_tools = {}
278
+ missing = []
279
279
  for tool_name in use_list:
280
280
  if tool_name in self.tools:
281
281
  filtered_tools[tool_name] = self.tools[tool_name]
282
282
  else:
283
- PrettyOutput.print(
284
- f"警告: 配置的工具 '{tool_name}' 不存在",
285
- OutputType.WARNING,
286
- )
283
+ missing.append(tool_name)
284
+ if missing:
285
+ PrettyOutput.print(
286
+ "警告: 配置的工具不存在: " + ", ".join(f"'{name}'" for name in missing),
287
+ OutputType.WARNING,
288
+ )
287
289
  self.tools = filtered_tools
288
290
 
289
291
  # 如果配置了 dont_use 列表,排除列表中的工具
@@ -315,14 +317,15 @@ class ToolRegistry(OutputHandlerProtocol):
315
317
  )
316
318
 
317
319
  # 遍历目录中的所有.yaml文件
320
+ error_lines = []
318
321
  for file_path in mcp_tools_dir.glob("*.yaml"):
319
322
  try:
320
323
  config = yaml.safe_load(open(file_path, "r", encoding="utf-8"))
321
324
  self.register_mcp_tool_by_config(config)
322
325
  except Exception as e:
323
- PrettyOutput.print(
324
- f"文件 {file_path} 加载失败: {str(e)}", OutputType.WARNING
325
- )
326
+ error_lines.append(f"文件 {file_path} 加载失败: {str(e)}")
327
+ if error_lines:
328
+ PrettyOutput.print("\n".join(error_lines), OutputType.WARNING)
326
329
 
327
330
  def _load_builtin_tools(self) -> None:
328
331
  """从内置工具目录加载工具"""
@@ -613,7 +616,9 @@ class ToolRegistry(OutputHandlerProtocol):
613
616
  )
614
617
 
615
618
  @staticmethod
616
- def _extract_tool_calls(content: str) -> Tuple[Dict[str, Dict[str, Any]], str, bool]:
619
+ def _extract_tool_calls(
620
+ content: str,
621
+ ) -> Tuple[Dict[str, Dict[str, Any]], str, bool]:
617
622
  """从内容中提取工具调用。
618
623
 
619
624
  参数:
@@ -133,7 +133,7 @@ class SubAgentTool:
133
133
  system_prompt=system_prompt,
134
134
  name="SubAgent",
135
135
  description="Temporary sub agent for executing a subtask",
136
- llm_type="normal", # 使用默认模型类型
136
+
137
137
  model_group=model_group, # 继承父Agent模型组(如可用)
138
138
  summary_prompt=summary_prompt, # 继承父Agent总结提示词(如可用)
139
139
  auto_complete=auto_complete,
@@ -103,12 +103,10 @@ class SubCodeAgentTool:
103
103
  pass
104
104
 
105
105
  # 创建 CodeAgent:参数优先使用父Agent的配置(若可获取),否则使用默认
106
- # 推断/继承 llm_type、need_summary、tool_group
107
- llm_type = "normal"
106
+ # 推断/继承 tool_group
108
107
  tool_group = None
109
108
  try:
110
109
  if parent_agent is not None:
111
- llm_type = getattr(parent_agent, "llm_type", llm_type)
112
110
  tool_group = getattr(parent_agent, "tool_group", tool_group)
113
111
  except Exception:
114
112
  pass
@@ -135,7 +133,6 @@ class SubCodeAgentTool:
135
133
 
136
134
  try:
137
135
  code_agent = CodeAgent(
138
- llm_type=llm_type,
139
136
  model_group=model_group,
140
137
  need_summary=True,
141
138
  append_tools=append_tools,