jarvis-ai-assistant 0.3.17__py3-none-any.whl → 0.3.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/jarvis_agent/__init__.py +23 -10
  3. jarvis/jarvis_agent/edit_file_handler.py +8 -13
  4. jarvis/jarvis_agent/jarvis.py +13 -3
  5. jarvis/jarvis_agent/memory_manager.py +4 -4
  6. jarvis/jarvis_agent/methodology_share_manager.py +2 -2
  7. jarvis/jarvis_agent/task_analyzer.py +4 -3
  8. jarvis/jarvis_agent/task_manager.py +6 -6
  9. jarvis/jarvis_agent/tool_executor.py +2 -2
  10. jarvis/jarvis_agent/tool_share_manager.py +2 -2
  11. jarvis/jarvis_code_agent/code_agent.py +21 -29
  12. jarvis/jarvis_code_analysis/code_review.py +2 -4
  13. jarvis/jarvis_data/config_schema.json +5 -0
  14. jarvis/jarvis_git_utils/git_commiter.py +17 -18
  15. jarvis/jarvis_methodology/main.py +12 -12
  16. jarvis/jarvis_platform/base.py +21 -13
  17. jarvis/jarvis_platform/kimi.py +13 -13
  18. jarvis/jarvis_platform/tongyi.py +17 -15
  19. jarvis/jarvis_platform/yuanbao.py +11 -11
  20. jarvis/jarvis_platform_manager/main.py +12 -22
  21. jarvis/jarvis_rag/cli.py +36 -32
  22. jarvis/jarvis_rag/embedding_manager.py +11 -6
  23. jarvis/jarvis_rag/llm_interface.py +6 -5
  24. jarvis/jarvis_rag/rag_pipeline.py +9 -8
  25. jarvis/jarvis_rag/reranker.py +3 -2
  26. jarvis/jarvis_rag/retriever.py +18 -8
  27. jarvis/jarvis_smart_shell/main.py +306 -46
  28. jarvis/jarvis_stats/stats.py +40 -0
  29. jarvis/jarvis_stats/storage.py +220 -9
  30. jarvis/jarvis_tools/clear_memory.py +0 -11
  31. jarvis/jarvis_tools/cli/main.py +18 -17
  32. jarvis/jarvis_tools/edit_file.py +4 -4
  33. jarvis/jarvis_tools/execute_script.py +5 -1
  34. jarvis/jarvis_tools/file_analyzer.py +6 -6
  35. jarvis/jarvis_tools/generate_new_tool.py +6 -17
  36. jarvis/jarvis_tools/read_code.py +3 -6
  37. jarvis/jarvis_tools/read_webpage.py +74 -13
  38. jarvis/jarvis_tools/registry.py +8 -28
  39. jarvis/jarvis_tools/retrieve_memory.py +5 -16
  40. jarvis/jarvis_tools/rewrite_file.py +0 -4
  41. jarvis/jarvis_tools/save_memory.py +2 -10
  42. jarvis/jarvis_tools/search_web.py +5 -8
  43. jarvis/jarvis_tools/virtual_tty.py +22 -40
  44. jarvis/jarvis_utils/clipboard.py +3 -3
  45. jarvis/jarvis_utils/config.py +8 -0
  46. jarvis/jarvis_utils/input.py +67 -27
  47. jarvis/jarvis_utils/methodology.py +3 -3
  48. jarvis/jarvis_utils/output.py +1 -7
  49. jarvis/jarvis_utils/utils.py +44 -58
  50. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/METADATA +1 -1
  51. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/RECORD +55 -55
  52. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/WHEEL +0 -0
  53. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/entry_points.txt +0 -0
  54. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/licenses/LICENSE +0 -0
  55. {jarvis_ai_assistant-0.3.17.dist-info → jarvis_ai_assistant-0.3.19.dist-info}/top_level.txt +0 -0
@@ -35,6 +35,10 @@ class StatsStorage:
35
35
  self.data_dir = self.storage_dir / "data"
36
36
  self.data_dir.mkdir(exist_ok=True)
37
37
 
38
+ # 统计总量缓存目录(每个指标一个文件,内容为统计总量)
39
+ self.totals_dir = self.storage_dir / "totals"
40
+ self.totals_dir.mkdir(exist_ok=True)
41
+
38
42
  # 元数据文件路径
39
43
  self.meta_file = self.storage_dir / "stats_meta.json"
40
44
 
@@ -108,6 +112,31 @@ class StatsStorage:
108
112
  pass
109
113
  raise RuntimeError(f"保存数据失败: {e}") from e
110
114
 
115
+ def _save_text_atomic(self, filepath: Path, text: str):
116
+ """原子性地保存纯文本内容"""
117
+ temp_suffix = f".tmp.{uuid.uuid4().hex[:8]}"
118
+ temp_filepath = filepath.with_suffix(temp_suffix)
119
+ max_retries = 3
120
+ for attempt in range(max_retries):
121
+ try:
122
+ with open(temp_filepath, "w", encoding="utf-8") as f:
123
+ f.write(text)
124
+
125
+ if sys.platform == "win32" and filepath.exists():
126
+ filepath.unlink()
127
+ temp_filepath.rename(filepath)
128
+ return
129
+ except Exception:
130
+ if attempt < max_retries - 1:
131
+ time.sleep(0.1 * (attempt + 1))
132
+ continue
133
+ if temp_filepath.exists():
134
+ try:
135
+ temp_filepath.unlink()
136
+ except OSError:
137
+ pass
138
+ raise
139
+
111
140
  def add_metric(
112
141
  self,
113
142
  metric_name: str,
@@ -141,6 +170,13 @@ class StatsStorage:
141
170
  meta["metrics"][metric_name]["last_updated"] = timestamp.isoformat()
142
171
  if unit and not meta["metrics"][metric_name].get("unit"):
143
172
  meta["metrics"][metric_name]["unit"] = unit
173
+
174
+ # 记录分组信息(如果提供)
175
+ if tags and isinstance(tags, dict):
176
+ group = tags.get("group")
177
+ if group:
178
+ meta["metrics"][metric_name]["group"] = group
179
+
144
180
  self._save_json(self.meta_file, meta)
145
181
 
146
182
  # 获取日期对应的数据文件
@@ -169,6 +205,26 @@ class StatsStorage:
169
205
  # 保存数据到日期文件
170
206
  self._save_json(date_file, data)
171
207
 
208
+ # 更新总量缓存文件(每个指标一个文件,内容为累计统计值)
209
+ try:
210
+ total_file = self._get_total_file(metric_name)
211
+ if total_file.exists():
212
+ # 正常累加
213
+ try:
214
+ with open(total_file, "r", encoding="utf-8") as tf:
215
+ current_total = float(tf.read().strip() or "0")
216
+ except Exception:
217
+ current_total = 0.0
218
+ new_total = current_total + float(value)
219
+ self._save_text_atomic(total_file, str(new_total))
220
+ else:
221
+ # 首次生成:扫描历史数据(包含刚写入的这条记录)并写入
222
+ # 注意:get_metric_total 内部会完成扫描并写入 totals 文件,这里无需再额外写入或累加
223
+ _ = self.get_metric_total(metric_name)
224
+ except Exception:
225
+ # 静默失败,不影响主流程
226
+ pass
227
+
172
228
  def get_metrics(
173
229
  self,
174
230
  metric_name: str,
@@ -236,6 +292,144 @@ class StatsStorage:
236
292
 
237
293
  return results
238
294
 
295
+ def _get_total_file(self, metric_name: str) -> Path:
296
+ """获取某个指标的总量文件路径"""
297
+ return self.totals_dir / metric_name
298
+
299
+ def get_metric_total(self, metric_name: str) -> float:
300
+ """
301
+ 获取某个指标的累计总量。
302
+ - 如果总量缓存文件存在,直接读取
303
+ - 如果不存在,则扫描历史数据计算一次并写入缓存
304
+ """
305
+ total_file = self._get_total_file(metric_name)
306
+ # 优先读取缓存
307
+ if total_file.exists():
308
+ try:
309
+ with open(total_file, "r", encoding="utf-8") as f:
310
+ return float((f.read() or "0").strip() or "0")
311
+ except Exception:
312
+ # 读取失败则重建
313
+ pass
314
+
315
+ # 扫描历史数据进行一次性计算,并尽可能推断分组信息
316
+ total = 0.0
317
+ group_counts: Dict[str, int] = {}
318
+ try:
319
+ for data_file in self.data_dir.glob("stats_*.json"):
320
+ data = self._load_json(data_file)
321
+ metric_data = data.get(metric_name) or {}
322
+ # metric_data: {hour_key: [records]}
323
+ for hour_records in metric_data.values():
324
+ for record in hour_records:
325
+ # 累加数值
326
+ try:
327
+ total += float(record.get("value", 0))
328
+ except Exception:
329
+ pass
330
+ # 统计历史记录中的分组标签
331
+ try:
332
+ tags = record.get("tags", {})
333
+ grp = tags.get("group")
334
+ if grp:
335
+ group_counts[grp] = group_counts.get(grp, 0) + 1
336
+ except Exception:
337
+ pass
338
+
339
+ # 写入缓存
340
+ self._save_text_atomic(total_file, str(total))
341
+
342
+ # 如果元数据中没有该指标或缺少分组,则根据历史数据推断一次
343
+ try:
344
+ meta = self._load_json(self.meta_file)
345
+ if "metrics" not in meta or not isinstance(meta.get("metrics"), dict):
346
+ meta["metrics"] = {}
347
+ info = meta["metrics"].get(metric_name)
348
+ now_iso = datetime.now().isoformat()
349
+ if info is None:
350
+ info = {"unit": None, "created_at": now_iso, "last_updated": now_iso}
351
+ meta["metrics"][metric_name] = info
352
+ if not info.get("group"):
353
+ inferred_group = None
354
+ if group_counts:
355
+ inferred_group = max(group_counts.items(), key=lambda kv: kv[1])[0]
356
+ # 名称启发式作为补充
357
+ if not inferred_group:
358
+ if metric_name.startswith("code_lines_") or "commit" in metric_name:
359
+ inferred_group = "code_agent"
360
+ if inferred_group:
361
+ info["group"] = inferred_group
362
+ # 保存元数据
363
+ self._save_json(self.meta_file, meta)
364
+ except Exception:
365
+ # 分组推断失败不影响总量结果
366
+ pass
367
+
368
+ except Exception:
369
+ # 失败则返回0
370
+ return 0.0
371
+ return total
372
+
373
+ def resolve_metric_group(self, metric_name: str) -> Optional[str]:
374
+ """
375
+ 解析并确保写回某个指标的分组信息:
376
+ - 若元数据已存在group则直接返回
377
+ - 否则扫描历史记录中的tags['group']做多数投票推断
378
+ - 若仍无法得到,则用名称启发式(code_lines_*或包含commit -> code_agent)
379
+ - 推断出group后会写回到元数据,返回推断值;否则返回None
380
+ """
381
+ try:
382
+ # 优先从元数据读取
383
+ meta = self._load_json(self.meta_file)
384
+ metrics_meta = meta.get("metrics", {}) if isinstance(meta.get("metrics"), dict) else {}
385
+ info = metrics_meta.get(metric_name)
386
+ if info and isinstance(info, dict):
387
+ grp = info.get("group")
388
+ if grp:
389
+ return grp
390
+
391
+ # 扫描历史记录以推断
392
+ group_counts: Dict[str, int] = {}
393
+ for data_file in self.data_dir.glob("stats_*.json"):
394
+ data = self._load_json(data_file)
395
+ metric_data = data.get(metric_name) or {}
396
+ for hour_records in metric_data.values():
397
+ for record in hour_records:
398
+ try:
399
+ tags = record.get("tags", {})
400
+ grp = tags.get("group")
401
+ if grp:
402
+ group_counts[grp] = group_counts.get(grp, 0) + 1
403
+ except Exception:
404
+ continue
405
+
406
+ inferred_group: Optional[str] = None
407
+ if group_counts:
408
+ inferred_group = max(group_counts.items(), key=lambda kv: kv[1])[0]
409
+
410
+ # 名称启发式补充
411
+ if not inferred_group:
412
+ name = metric_name or ""
413
+ if name.startswith("code_lines_") or ("commit" in name):
414
+ inferred_group = "code_agent"
415
+
416
+ # 如果推断出了分组,写回元数据
417
+ if inferred_group:
418
+ if not isinstance(metrics_meta, dict):
419
+ meta["metrics"] = {}
420
+ metrics_meta = meta["metrics"]
421
+ if info is None:
422
+ now_iso = datetime.now().isoformat()
423
+ info = {"unit": None, "created_at": now_iso, "last_updated": now_iso}
424
+ metrics_meta[metric_name] = info
425
+ info["group"] = inferred_group
426
+ self._save_json(self.meta_file, meta)
427
+ return inferred_group
428
+
429
+ return None
430
+ except Exception:
431
+ return None
432
+
239
433
  def get_metric_info(self, metric_name: str) -> Optional[Dict]:
240
434
  """获取指标元信息"""
241
435
  meta = self._load_json(self.meta_file)
@@ -246,7 +440,7 @@ class StatsStorage:
246
440
  # 从元数据文件获取指标
247
441
  meta = self._load_json(self.meta_file)
248
442
  metrics_from_meta = set(meta.get("metrics", {}).keys())
249
-
443
+
250
444
  # 扫描所有数据文件获取实际存在的指标
251
445
  metrics_from_data: set[str] = set()
252
446
  for data_file in self.data_dir.glob("stats_*.json"):
@@ -256,9 +450,18 @@ class StatsStorage:
256
450
  except (json.JSONDecodeError, OSError):
257
451
  # 忽略无法读取的文件
258
452
  continue
259
-
260
- # 合并两个来源的指标并返回排序后的列表
261
- all_metrics = metrics_from_meta.union(metrics_from_data)
453
+
454
+ # 扫描总量缓存目录中已有的指标文件
455
+ metrics_from_totals: set[str] = set()
456
+ try:
457
+ for f in self.totals_dir.glob("*"):
458
+ if f.is_file():
459
+ metrics_from_totals.add(f.name)
460
+ except Exception:
461
+ pass
462
+
463
+ # 合并三个来源的指标并返回排序后的列表
464
+ all_metrics = metrics_from_meta.union(metrics_from_data).union(metrics_from_totals)
262
465
  return sorted(list(all_metrics))
263
466
 
264
467
  def aggregate_metrics(
@@ -331,10 +534,10 @@ class StatsStorage:
331
534
  def delete_metric(self, metric_name: str) -> bool:
332
535
  """
333
536
  删除指定的指标及其所有数据
334
-
537
+
335
538
  Args:
336
539
  metric_name: 要删除的指标名称
337
-
540
+
338
541
  Returns:
339
542
  True 如果成功删除,False 如果指标不存在
340
543
  """
@@ -342,11 +545,11 @@ class StatsStorage:
342
545
  meta = self._load_json(self.meta_file)
343
546
  if metric_name not in meta.get("metrics", {}):
344
547
  return False
345
-
548
+
346
549
  # 从元数据中删除指标
347
550
  del meta["metrics"][metric_name]
348
551
  self._save_json(self.meta_file, meta)
349
-
552
+
350
553
  # 遍历所有数据文件,删除该指标的数据
351
554
  for data_file in self.data_dir.glob("stats_*.json"):
352
555
  try:
@@ -362,7 +565,15 @@ class StatsStorage:
362
565
  except Exception:
363
566
  # 忽略单个文件的错误,继续处理其他文件
364
567
  pass
365
-
568
+
569
+ # 删除总量缓存文件
570
+ try:
571
+ total_file = self._get_total_file(metric_name)
572
+ if total_file.exists():
573
+ total_file.unlink()
574
+ except Exception:
575
+ pass
576
+
366
577
  return True
367
578
 
368
579
  def delete_old_data(self, days_to_keep: int = 30):
@@ -210,9 +210,6 @@ class ClearMemoryTool:
210
210
  total_removed += result["removed"]
211
211
 
212
212
  # 生成结果报告
213
- PrettyOutput.print(
214
- f"记忆清除完成,共清除 {total_removed} 条记忆", OutputType.SUCCESS
215
- )
216
213
 
217
214
  # 详细报告
218
215
  report = f"# 记忆清除报告\n\n"
@@ -232,14 +229,6 @@ class ClearMemoryTool:
232
229
  report += f"- 已清除: {result['removed']} 条\n"
233
230
  report += f"- 剩余: {result['total'] - result['removed']} 条\n\n"
234
231
 
235
- # 在终端显示摘要
236
- for memory_type, result in results.items():
237
- if result["removed"] > 0:
238
- PrettyOutput.print(
239
- f"{memory_type}: 清除了 {result['removed']}/{result['total']} 条记忆",
240
- OutputType.INFO,
241
- )
242
-
243
232
  return {
244
233
  "success": True,
245
234
  "stdout": report,
@@ -23,20 +23,21 @@ def list_tools(
23
23
 
24
24
  if as_json:
25
25
  if detailed:
26
- print(json.dumps(tools, indent=2, ensure_ascii=False))
26
+ PrettyOutput.print(json.dumps(tools, indent=2, ensure_ascii=False), OutputType.CODE, lang="json")
27
27
  else:
28
28
  simple_tools = [
29
29
  {"name": t["name"], "description": t["description"]} for t in tools
30
30
  ]
31
- print(json.dumps(simple_tools, indent=2, ensure_ascii=False))
31
+ PrettyOutput.print(json.dumps(simple_tools, indent=2, ensure_ascii=False), OutputType.CODE, lang="json")
32
32
  else:
33
33
  PrettyOutput.section("可用工具列表", OutputType.SYSTEM)
34
34
  for tool in tools:
35
- print(f"\n{tool['name']}")
36
- print(f" 描述: {tool['description']}")
35
+ PrettyOutput.print(f"\n{tool['name']}", OutputType.SUCCESS)
36
+ PrettyOutput.print(f" 描述: {tool['description']}", OutputType.INFO)
37
37
  if detailed:
38
- print(" 参数:")
39
- print(tool["parameters"])
38
+ PrettyOutput.print(" 参数:", OutputType.INFO)
39
+ import json as _json # local import to ensure available
40
+ PrettyOutput.print(_json.dumps(tool["parameters"], ensure_ascii=False, indent=2), OutputType.CODE, lang="json")
40
41
 
41
42
 
42
43
  @app.command("stat")
@@ -63,15 +64,15 @@ def stat_tools(
63
64
  table_data.sort(key=lambda x: x[1], reverse=True)
64
65
 
65
66
  if as_json:
66
- print(json.dumps(dict(table_data), indent=2))
67
+ PrettyOutput.print(json.dumps(dict(table_data), indent=2), OutputType.CODE, lang="json")
67
68
  else:
68
69
  time_desc = f"最近{last_days}天" if last_days else "所有历史"
69
70
  PrettyOutput.section(f"工具调用统计 ({time_desc})", OutputType.SYSTEM)
70
71
  if table_data:
71
- print(tabulate(table_data, headers=["工具名称", "调用次数"], tablefmt="grid"))
72
- print(f"\n总计: {len(table_data)} 个工具被使用,共 {sum(x[1] for x in table_data)} 次调用")
72
+ PrettyOutput.print(tabulate(table_data, headers=["工具名称", "调用次数"], tablefmt="grid"), OutputType.CODE, lang="text")
73
+ PrettyOutput.print(f"\n总计: {len(table_data)} 个工具被使用,共 {sum(x[1] for x in table_data)} 次调用", OutputType.INFO)
73
74
  else:
74
- print("暂无工具调用记录")
75
+ PrettyOutput.print("暂无工具调用记录", OutputType.INFO)
75
76
  else:
76
77
  # 使用 stats 系统的高级功能
77
78
  PrettyOutput.section("工具组统计", OutputType.SYSTEM)
@@ -125,7 +126,7 @@ def stat_tools(
125
126
  tags={"group": "tool"}
126
127
  )
127
128
  else:
128
- print("暂无工具调用记录")
129
+ PrettyOutput.print("暂无工具调用记录", OutputType.INFO)
129
130
 
130
131
 
131
132
  @app.command("call")
@@ -143,7 +144,7 @@ def call_tool(
143
144
  if not tool_obj:
144
145
  PrettyOutput.print(f"错误: 工具 '{tool_name}' 不存在", OutputType.ERROR)
145
146
  available_tools = ", ".join([t["name"] for t in registry.get_all_tools()])
146
- print(f"可用工具: {available_tools}")
147
+ PrettyOutput.print(f"可用工具: {available_tools}", OutputType.INFO)
147
148
  raise typer.Exit(code=1)
148
149
 
149
150
  tool_args = {}
@@ -170,12 +171,12 @@ def call_tool(
170
171
  PrettyOutput.print(
171
172
  f"错误: 缺少必需参数: {', '.join(missing_params)}", OutputType.ERROR
172
173
  )
173
- print("\n参数说明:")
174
+ PrettyOutput.print("\n参数说明:", OutputType.INFO)
174
175
  params = tool_obj.parameters.get("properties", {})
175
176
  for param_name in required_params:
176
177
  param_info = params.get(param_name, {})
177
178
  desc = param_info.get("description", "无描述")
178
- print(f" - {param_name}: {desc}")
179
+ PrettyOutput.print(f" - {param_name}: {desc}", OutputType.INFO)
179
180
  raise typer.Exit(code=1)
180
181
 
181
182
  result = registry.execute_tool(tool_name, tool_args)
@@ -186,12 +187,12 @@ def call_tool(
186
187
  PrettyOutput.section(f"工具 {tool_name} 执行失败", OutputType.ERROR)
187
188
 
188
189
  if result.get("stdout"):
189
- print("\n输出:")
190
- print(result["stdout"])
190
+ PrettyOutput.print("\n输出:", OutputType.INFO)
191
+ PrettyOutput.print(result["stdout"], OutputType.CODE, lang="text")
191
192
 
192
193
  if result.get("stderr"):
193
194
  PrettyOutput.print("\n错误:", OutputType.ERROR)
194
- print(result["stderr"])
195
+ PrettyOutput.print(result["stderr"], OutputType.ERROR, lang="text")
195
196
 
196
197
  if not result["success"]:
197
198
  raise typer.Exit(code=1)
@@ -148,12 +148,12 @@ class FileSearchReplaceTool:
148
148
  content = f.read()
149
149
  original_content = content
150
150
 
151
- print(f"⚙️ 正在处理文件 {file_path}...")
151
+
152
152
  success, temp_content = EditFileHandler._fast_edit(
153
153
  file_path, changes
154
154
  )
155
155
  if not success:
156
- print(f"文件 {file_path} 处理失败")
156
+ PrettyOutput.print(f"文件 {file_path} 处理失败", OutputType.ERROR)
157
157
  file_results.append(
158
158
  {
159
159
  "file": file_path,
@@ -164,7 +164,7 @@ class FileSearchReplaceTool:
164
164
  )
165
165
  continue
166
166
 
167
- print(f"✅ 文件 {file_path} 内容生成完成")
167
+
168
168
 
169
169
  # 只有当所有替换操作都成功时,才写回文件
170
170
  if success and (
@@ -183,7 +183,7 @@ class FileSearchReplaceTool:
183
183
  action = "创建并写入" if not file_exists else "成功修改"
184
184
  stdout_message = f"文件 {file_path} {action} 完成"
185
185
  stdout_messages.append(stdout_message)
186
- PrettyOutput.print(stdout_message, OutputType.SUCCESS)
186
+
187
187
  overall_success = True
188
188
 
189
189
  file_results.append(
@@ -173,4 +173,8 @@ class ScriptTool:
173
173
 
174
174
  if __name__ == "__main__":
175
175
  script_tool = ScriptTool()
176
- print(script_tool.get_display_output("/home/wangmaobin/code/Jarvis/a.txt"))
176
+ PrettyOutput.print(
177
+ script_tool.get_display_output("/home/wangmaobin/code/Jarvis/a.txt"),
178
+ OutputType.CODE,
179
+ lang="text",
180
+ )
@@ -73,19 +73,19 @@ class FileAnalyzerTool:
73
73
  platform.set_system_prompt(system_message)
74
74
 
75
75
  # 上传文件
76
- print(f"📤 正在上传文件...")
76
+
77
77
  try:
78
78
  upload_result = platform.upload_files(valid_files)
79
79
  if not upload_result:
80
- print(f"文件上传失败")
80
+ PrettyOutput.print("文件上传失败", OutputType.ERROR)
81
81
  return {
82
82
  "success": False,
83
83
  "stdout": "",
84
84
  "stderr": "文件上传失败",
85
85
  }
86
- print(f"✅ 文件上传成功")
86
+
87
87
  except Exception as e:
88
- print(f"文件上传失败: {str(e)}")
88
+ PrettyOutput.print(f"文件上传失败: {str(e)}", OutputType.ERROR)
89
89
  return {
90
90
  "success": False,
91
91
  "stdout": "",
@@ -102,9 +102,9 @@ class FileAnalyzerTool:
102
102
  请提供详细的分析结果和理由。"""
103
103
 
104
104
  # 发送请求并获取分析结果
105
- print(f"🔍 正在分析文件...")
105
+
106
106
  analysis_result = platform.chat_until_success(analysis_request)
107
- print(f"✅ 分析完成")
107
+
108
108
 
109
109
  # 清理会话
110
110
  platform.delete_chat()
@@ -123,9 +123,7 @@ class generate_new_tool:
123
123
  tool_registry = agent.get_tool_registry()
124
124
  if tool_registry:
125
125
  # 尝试加载并注册新工具
126
- PrettyOutput.print(
127
- f"正在注册工具 '{tool_name}'...", OutputType.INFO
128
- )
126
+
129
127
  if tool_registry.register_tool_by_file(str(tool_file_path)):
130
128
  success_message += f"\n已成功注册到当前会话的工具注册表中"
131
129
  registration_successful = True
@@ -148,12 +146,7 @@ class generate_new_tool:
148
146
  )
149
147
  success_message += f"\n注册到当前会话失败,可能需要重新启动Jarvis"
150
148
 
151
- PrettyOutput.print(
152
- f"工具 '{tool_name}' 创建"
153
- + ("并注册" if registration_successful else "")
154
- + "成功!",
155
- OutputType.SUCCESS,
156
- )
149
+
157
150
 
158
151
  # 检查并安装缺失的依赖
159
152
  try:
@@ -181,13 +174,11 @@ class generate_new_tool:
181
174
  try:
182
175
  __import__(pkg)
183
176
  except ImportError:
184
- PrettyOutput.print(
185
- f"检测到缺失依赖: {pkg}, 正在尝试安装...", OutputType.INFO
186
- )
177
+
187
178
  import subprocess
188
179
 
189
180
  subprocess.run(["pip", "install", pkg], check=True)
190
- PrettyOutput.print(f"成功安装依赖: {pkg}", OutputType.SUCCESS)
181
+
191
182
  except Exception as e:
192
183
  PrettyOutput.print(f"依赖检查/安装失败: {str(e)}", OutputType.WARNING)
193
184
 
@@ -201,11 +192,9 @@ class generate_new_tool:
201
192
  # 删除已创建的文件
202
193
  if tool_file_path and tool_file_path.exists():
203
194
  try:
204
- PrettyOutput.print(
205
- f"正在删除已创建的文件 {tool_file_path}...", OutputType.INFO
206
- )
195
+
207
196
  tool_file_path.unlink()
208
- PrettyOutput.print(f"文件已删除", OutputType.SUCCESS)
197
+
209
198
  except Exception as delete_error:
210
199
  PrettyOutput.print(
211
200
  f"删除文件失败: {str(delete_error)}", OutputType.ERROR
@@ -44,7 +44,7 @@ class ReadCodeTool:
44
44
  """
45
45
  try:
46
46
  abs_path = os.path.abspath(filepath)
47
- print(f"📖 正在读取文件: {abs_path}...")
47
+
48
48
  # 文件存在性检查
49
49
  if not os.path.exists(abs_path):
50
50
  return {
@@ -69,7 +69,6 @@ class ReadCodeTool:
69
69
 
70
70
  # 处理空文件情况
71
71
  if total_lines == 0:
72
- print(f"✅ 文件读取完成: {abs_path}")
73
72
  return {
74
73
  "success": True,
75
74
  "stdout": f"\n🔍 文件: {abs_path}\n📄 文件为空 (0行)\n",
@@ -93,9 +92,7 @@ class ReadCodeTool:
93
92
  )
94
93
 
95
94
  if start_line > end_line:
96
- print(
97
- f"❌ 无效的行范围 [{start_line}-{end_line}] (总行数: {total_lines})"
98
- )
95
+
99
96
  return {
100
97
  "success": False,
101
98
  "stdout": "",
@@ -117,7 +114,7 @@ class ReadCodeTool:
117
114
  f"📄 原始行号: {start_line}-{end_line} (共{total_lines}行) \n\n"
118
115
  f"{numbered_content}\n\n"
119
116
  )
120
- print(f"✅ 文件读取完成: {abs_path}")
117
+
121
118
 
122
119
  if agent:
123
120
  files = agent.get_user_data("files")