jarvis-ai-assistant 0.1.81__py3-none-any.whl → 0.1.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

jarvis/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Jarvis AI Assistant"""
2
2
 
3
- __version__ = "0.1.81"
3
+ __version__ = "0.1.83"
jarvis/agent.py CHANGED
@@ -224,7 +224,6 @@ class Agent:
224
224
  summary = self.model.chat(self.prompt + "\n" + prompt)
225
225
 
226
226
  # 清空当前对话历史,但保留系统消息
227
- self.model.delete_chat()
228
227
  self.conversation_length = 0 # 重置对话长度
229
228
 
230
229
  # 添加总结作为新的上下文
@@ -293,13 +292,12 @@ class Agent:
293
292
  return self._call_model(self.prompt)
294
293
 
295
294
 
296
- def run(self, user_input: str, file_list: Optional[List[str]] = None, keep_history: bool = False) -> str:
295
+ def run(self, user_input: str, file_list: Optional[List[str]] = None) -> str:
297
296
  """处理用户输入并返回响应,返回任务总结报告
298
297
 
299
298
  Args:
300
299
  user_input: 用户输入的任务描述
301
300
  file_list: 可选的文件列表,默认为None
302
- keep_history: 是否保留对话历史,默认为False
303
301
 
304
302
  Returns:
305
303
  str: 任务总结报告
@@ -320,12 +318,12 @@ class Agent:
320
318
  tools_prompt = ""
321
319
 
322
320
  # 选择工具
323
- PrettyOutput.section("选择工具", OutputType.PLANNING)
321
+ PrettyOutput.section("可用工具", OutputType.PLANNING)
324
322
  tools = self.tool_registry.get_all_tools()
325
323
  if tools:
326
324
  tools_prompt += "可用工具:\n"
327
325
  for tool in tools:
328
- PrettyOutput.print(f"选择工具: {tool['name']}", OutputType.INFO)
326
+ PrettyOutput.print(f"{tool['name']}: {tool['description']}", OutputType.INFO)
329
327
  tools_prompt += f"- 名称: {tool['name']}\n"
330
328
  tools_prompt += f" 描述: {tool['description']}\n"
331
329
  tools_prompt += f" 参数: {tool['parameters']}\n"
@@ -440,13 +438,6 @@ arguments:
440
438
  PrettyOutput.print(str(e), OutputType.ERROR)
441
439
  return f"Task failed: {str(e)}"
442
440
 
443
- finally:
444
- # 只在不保留历史时删除会话
445
- if not keep_history:
446
- try:
447
- self.model.delete_chat()
448
- except Exception as e:
449
- PrettyOutput.print(f"清理会话时发生错误: {str(e)}", OutputType.ERROR)
450
441
 
451
442
  def clear_history(self):
452
443
  """清除对话历史,只保留系统提示"""
@@ -18,7 +18,7 @@ class CodeBase:
18
18
  load_env_from_file()
19
19
  self.root_dir = root_dir
20
20
  os.chdir(self.root_dir)
21
- self.thread_count = int(os.environ.get("JARVIS_THREAD_COUNT") or 10)
21
+ self.thread_count = int(os.environ.get("JARVIS_THREAD_COUNT") or 1)
22
22
  self.max_context_length = int(os.getenv("JARVIS_MAX_CONTEXT_LENGTH", 65536))
23
23
 
24
24
  # 初始化数据目录
@@ -395,12 +395,11 @@ class CodeBase:
395
395
  if not initial_results:
396
396
  return []
397
397
 
398
- model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
399
- model.set_suppress_output(True)
398
+ model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
399
+ # model.set_suppress_output(True)
400
400
 
401
- try:
402
- # 构建重排序的prompt
403
- prompt = f"""请根据用户的查询,对以下代码文件进行相关性排序。对每个文件给出0-100的相关性分数,分数越高表示越相关。
401
+ # 构建重排序的prompt
402
+ prompt = f"""请根据用户的查询,对以下代码文件进行相关性排序。对每个文件给出0-100的相关性分数,分数越高表示越相关。
404
403
  只需要输出每个文件的分数,格式为:
405
404
  <RERANK_START>
406
405
  文件路径: 分数
@@ -411,62 +410,57 @@ class CodeBase:
411
410
 
412
411
  待评估文件:
413
412
  """
414
- for path, _, desc in initial_results:
415
- prompt += f"""
413
+ for path, _, desc in initial_results:
414
+ prompt += f"""
416
415
  文件: {path}
417
416
  描述: {desc}
418
417
  ---
419
418
  """
420
-
421
- response = model.chat(prompt)
422
-
423
- # 提取<RERANK_START>和<RERANK_END>之间的内容
424
- start_tag = "<RERANK_START>"
425
- end_tag = "<RERANK_END>"
426
- if start_tag in response and end_tag in response:
427
- response = response[response.find(start_tag) + len(start_tag):response.find(end_tag)]
428
-
429
- # 解析响应,提取文件路径和分数
430
- scored_results = []
431
- for line in response.split('\n'):
432
- if ':' not in line:
433
- continue
434
- try:
435
- file_path, score_str = line.split(':', 1)
436
- file_path = file_path.strip()
437
- score = float(score_str.strip()) / 100.0 # 转换为0-1范围
438
- # 只保留相关度大于等于0.7的结果
439
- if score >= 0.7:
440
- # 找到对应的原始描述
441
- desc = next((desc for p, _, desc in initial_results if p == file_path), "")
442
- scored_results.append((file_path, score, desc))
443
- except:
444
- continue
445
-
446
- # 按分数降序排序
447
- return sorted(scored_results, key=lambda x: x[1], reverse=True)
448
-
449
- finally:
450
- model.delete_chat()
451
419
 
452
- return initial_results
420
+ response = model.chat(prompt)
421
+
422
+ # 提取<RERANK_START>和<RERANK_END>之间的内容
423
+ start_tag = "<RERANK_START>"
424
+ end_tag = "<RERANK_END>"
425
+ if start_tag in response and end_tag in response:
426
+ response = response[response.find(start_tag) + len(start_tag):response.find(end_tag)]
427
+
428
+ # 解析响应,提取文件路径和分数
429
+ scored_results = []
430
+ for line in response.split('\n'):
431
+ if ':' not in line:
432
+ continue
433
+ try:
434
+ file_path, score_str = line.split(':', 1)
435
+ file_path = file_path.strip()
436
+ score = float(score_str.strip()) / 100.0 # 转换为0-1范围
437
+ # 只保留相关度大于等于0.7的结果
438
+ if score >= 0.7:
439
+ # 找到对应的原始描述
440
+ desc = next((desc for p, _, desc in initial_results if p == file_path), "")
441
+ scored_results.append((file_path, score, desc))
442
+ except:
443
+ continue
444
+
445
+ # 按分数降序排序
446
+ return sorted(scored_results, key=lambda x: x[1], reverse=True)
447
+
453
448
 
454
449
  def search_similar(self, query: str, top_k: int = 30) -> List[Tuple[str, float, str]]:
455
450
  """搜索相似文件"""
456
451
  model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
457
452
  model.set_suppress_output(True)
458
453
 
459
- try:
460
- prompt = f"""请根据以下查询,生成意思完全相同的另一个表述。这个表述将用于代码搜索,所以要保持专业性和准确性。
454
+
455
+ prompt = f"""请根据以下查询,生成意思完全相同的另一个表述。这个表述将用于代码搜索,所以要保持专业性和准确性。
461
456
  原始查询: {query}
462
457
 
463
458
  请直接输出新表述,不要有编号或其他标记。
464
459
  """
460
+
461
+ query = model.chat(prompt)
465
462
 
466
- query = model.chat(prompt)
467
-
468
- finally:
469
- model.delete_chat()
463
+
470
464
 
471
465
  PrettyOutput.print(f"查询: {query}", output_type=OutputType.INFO)
472
466
 
@@ -537,11 +531,8 @@ class CodeBase:
537
531
  请用专业的语言回答用户的问题,如果给出的文件内容不足以回答用户的问题,请告诉用户,绝对不要胡编乱造。
538
532
  """
539
533
  model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
540
- try:
541
- response = model.chat(prompt)
542
- return response
543
- finally:
544
- model.delete_chat()
534
+ response = model.chat(prompt)
535
+ return response
545
536
 
546
537
  def is_index_generated(self) -> bool:
547
538
  """检查索引是否已经生成"""
@@ -68,10 +68,7 @@ class JarvisCoder:
68
68
 
69
69
  def _new_model(self):
70
70
  """获取大模型"""
71
- model = PlatformRegistry().get_global_platform_registry().create_platform(self.platform)
72
- if self.model:
73
- model_name = self.model
74
- model.set_model_name(model_name)
71
+ model = PlatformRegistry().get_global_platform_registry().get_codegen_platform()
75
72
  return model
76
73
 
77
74
  def _has_uncommitted_files(self) -> bool:
@@ -407,8 +404,7 @@ class JarvisCoder:
407
404
  """
408
405
 
409
406
  # 使用normal模型生成commit信息
410
- model = PlatformRegistry().get_global_platform_registry().create_platform(self.platform)
411
- model.set_model_name(self.model)
407
+ model = PlatformRegistry().get_global_platform_registry().get_codegen_platform()
412
408
  model.set_suppress_output(True)
413
409
  success, response = self._call_model_with_retry(model, prompt)
414
410
  if not success:
@@ -486,9 +482,6 @@ def main():
486
482
  parser.add_argument('-l', '--language', help='编程语言', default="python")
487
483
  args = parser.parse_args()
488
484
 
489
- # 设置平台
490
- if not args.platform:
491
- print("错误: 未指定AI平台,请使用 -p 参数")
492
485
 
493
486
  tool = JarvisCoder(args.dir, args.language)
494
487
 
jarvis/jarvis_rag/main.py CHANGED
@@ -141,6 +141,7 @@ class RAGTool:
141
141
  # 初始化配置
142
142
  self.min_paragraph_length = int(os.environ.get("JARVIS_MIN_PARAGRAPH_LENGTH", "50")) # 最小段落长度
143
143
  self.max_paragraph_length = int(os.environ.get("JARVIS_MAX_PARAGRAPH_LENGTH", "1000")) # 最大段落长度
144
+ self.context_window = int(os.environ.get("JARVIS_CONTEXT_WINDOW", "5")) # 上下文窗口大小,默认前后各5个片段
144
145
 
145
146
  # 初始化数据目录
146
147
  self.data_dir = os.path.join(self.root_dir, ".jarvis-rag")
@@ -160,6 +161,7 @@ class RAGTool:
160
161
  self.cache_path = os.path.join(self.data_dir, "cache.pkl")
161
162
  self.documents: List[Document] = []
162
163
  self.index = None
164
+ self.max_context_length = int(os.getenv("JARVIS_MAX_CONTEXT_LENGTH", 65536))
163
165
 
164
166
  # 加载缓存
165
167
  self._load_cache()
@@ -424,11 +426,58 @@ class RAGTool:
424
426
 
425
427
  # 返回结果
426
428
  results = []
429
+ current_length = 0
430
+
427
431
  for idx, distance in zip(indices[0], distances[0]):
428
432
  if idx == -1: # FAISS返回-1表示无效结果
429
433
  continue
434
+
435
+ doc = self.documents[idx]
430
436
  similarity = 1.0 / (1.0 + float(distance))
431
- results.append((self.documents[idx], similarity))
437
+
438
+ # 获取同一文件中的所有文档片段
439
+ file_docs = [d for d in self.documents if d.metadata['file_path'] == doc.metadata['file_path']]
440
+ file_docs.sort(key=lambda x: x.metadata['chunk_index'])
441
+
442
+ # 找到当前片段的索引
443
+ current_idx = file_docs.index(doc)
444
+
445
+ # 尝试不同的上下文窗口大小,从最大到最小
446
+ added = False
447
+ for window_size in range(self.context_window, -1, -1):
448
+ start_idx = max(0, current_idx - window_size)
449
+ end_idx = min(len(file_docs), current_idx + window_size + 1)
450
+
451
+ # 合并内容,包含上下文
452
+ content_parts = []
453
+ content_parts.extend(file_docs[i].content for i in range(start_idx, current_idx))
454
+ content_parts.append(doc.content)
455
+ content_parts.extend(file_docs[i].content for i in range(current_idx + 1, end_idx))
456
+
457
+ merged_content = "\n".join(content_parts)
458
+
459
+ # 创建文档对象
460
+ context_doc = Document(
461
+ content=merged_content,
462
+ metadata={
463
+ **doc.metadata,
464
+ "similarity": similarity
465
+ }
466
+ )
467
+
468
+ # 计算添加这个结果后的总长度
469
+ total_content_length = len(merged_content)
470
+
471
+ # 检查是否在长度限制内
472
+ if current_length + total_content_length <= self.max_context_length:
473
+ results.append((context_doc, similarity))
474
+ current_length += total_content_length
475
+ added = True
476
+ break
477
+
478
+ # 如果即使没有上下文也无法添加,就停止添加更多结果
479
+ if not added:
480
+ break
432
481
 
433
482
  return results
434
483
 
@@ -443,11 +492,8 @@ class RAGTool:
443
492
  query: 查询文本
444
493
 
445
494
  Returns:
446
- 相关文档列表
495
+ 相关文档列表,包含上下文
447
496
  """
448
- if not self.is_index_built():
449
- raise ValueError("索引未构建,请先调用build_index()")
450
-
451
497
  results = self.search(query)
452
498
  return [doc for doc, _ in results]
453
499
 
@@ -471,14 +517,13 @@ class RAGTool:
471
517
  for doc in results:
472
518
  context.append(f"""
473
519
  来源文件: {doc.metadata['file_path']}
474
- 片段位置: {doc.metadata['chunk_index'] + 1}/{doc.metadata['total_chunks']}
475
520
  内容:
476
521
  {doc.content}
477
522
  ---
478
523
  """)
479
524
 
480
525
  # 构建提示词
481
- prompt = f"""请基于以下文档片段回答用户的问题。如果文档片段中的信息不足以完整回答问题,请明确指出。
526
+ prompt = f"""请基于以下文档片段回答用户的问题。如果文档内容不足以完整回答问题,请明确指出。
482
527
 
483
528
  用户问题: {question}
484
529
 
@@ -525,9 +570,6 @@ def main():
525
570
  return 0
526
571
 
527
572
  if args.search or args.ask:
528
- if not rag.is_index_built():
529
- PrettyOutput.print("索引尚未构建,请先使用 --dir 和 --build 参数构建索引", output_type=OutputType.WARNING)
530
- return 1
531
573
 
532
574
  if args.search:
533
575
  results = rag.query(args.search)
jarvis/main.py CHANGED
@@ -123,7 +123,7 @@ def main():
123
123
  selected_task = select_task(tasks)
124
124
  if selected_task:
125
125
  PrettyOutput.print(f"\n执行任务: {selected_task}", OutputType.INFO)
126
- agent.run(selected_task, args.files, keep_history=args.keep_history)
126
+ agent.run(selected_task, args.files)
127
127
  return 0
128
128
 
129
129
  # 如果没有选择预定义任务,进入交互模式
jarvis/models/ai8.py CHANGED
@@ -11,6 +11,8 @@ class AI8Model(BasePlatform):
11
11
 
12
12
  platform_name = "ai8"
13
13
  BASE_URL = "https://ai8.rcouyi.com"
14
+
15
+ first_time = True
14
16
 
15
17
  def __init__(self):
16
18
  """Initialize model"""
@@ -23,46 +25,47 @@ class AI8Model(BasePlatform):
23
25
  # 获取可用模型列表
24
26
  available_models = self.get_available_models()
25
27
 
26
- if available_models:
27
- PrettyOutput.section("支持的模型", OutputType.SUCCESS)
28
- for model in self.models.values():
29
- # 格式化显示模型信息
30
- model_str = f"{model['value']:<30}"
31
-
32
- # 添加标签
33
- model_str += f"{model['label']}"
34
-
35
- # 添加标签和积分信息
36
- attrs = []
37
- if model['attr'].get('tag'):
38
- attrs.append(model['attr']['tag'])
39
- if model['attr'].get('integral'):
40
- attrs.append(model['attr']['integral'])
28
+ if AI8Model.first_time:
29
+ AI8Model.first_time = False
30
+ if available_models:
31
+ PrettyOutput.section("支持的模型", OutputType.SUCCESS)
32
+ for model in self.models.values():
33
+ # 格式化显示模型信息
34
+ model_str = f"{model['value']:<30}"
41
35
 
42
- # 添加特性标记
43
- features = []
44
- if model['attr'].get('multimodal'):
45
- features.append("多模态")
46
- if model['attr'].get('plugin'):
47
- features.append("插件支持")
48
- if model['attr'].get('onlyImg'):
49
- features.append("图像支持")
50
- if features:
51
- model_str += f" [{'|'.join(features)}]"
36
+ # 添加标签
37
+ model_str += f"{model['label']}"
52
38
 
53
- # 添加备注
54
- if model['attr'].get('note'):
55
- model_str += f" - {model['attr']['note']}"
56
-
57
- PrettyOutput.print(model_str, OutputType.INFO)
58
- else:
59
- PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
39
+ # 添加标签和积分信息
40
+ attrs = []
41
+ if model['attr'].get('tag'):
42
+ attrs.append(model['attr']['tag'])
43
+ if model['attr'].get('integral'):
44
+ attrs.append(model['attr']['integral'])
45
+
46
+ # 添加特性标记
47
+ features = []
48
+ if model['attr'].get('multimodal'):
49
+ features.append("多模态")
50
+ if model['attr'].get('plugin'):
51
+ features.append("插件支持")
52
+ if model['attr'].get('onlyImg'):
53
+ features.append("图像支持")
54
+ if features:
55
+ model_str += f" [{'|'.join(features)}]"
56
+
57
+ # 添加备注
58
+ if model['attr'].get('note'):
59
+ model_str += f" - {model['attr']['note']}"
60
+
61
+ PrettyOutput.print(model_str, OutputType.INFO)
62
+ else:
63
+ PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
60
64
 
61
65
  self.token = os.getenv("AI8_API_KEY")
62
66
  if not self.token:
63
67
  raise Exception("AI8_API_KEY is not set")
64
68
 
65
- PrettyOutput.print("使用AI8_MODEL环境变量配置模型", OutputType.SUCCESS)
66
69
 
67
70
  self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
68
71
  if self.model_name not in self.models:
@@ -103,7 +106,6 @@ class AI8Model(BasePlatform):
103
106
  return False
104
107
 
105
108
  self.conversation = data['data']
106
- PrettyOutput.print(f"创建会话成功: {data['data']['id']}", OutputType.SUCCESS)
107
109
 
108
110
  # 2. 更新会话设置
109
111
  session_data = {
@@ -111,7 +113,7 @@ class AI8Model(BasePlatform):
111
113
  "model": self.model_name,
112
114
  "contextCount": 1024,
113
115
  "prompt": self.system_message,
114
- "plugins": ["tavily_search"],
116
+ "plugins": [],
115
117
  "localPlugins": None,
116
118
  "useAppId": 0
117
119
  }
@@ -126,7 +128,6 @@ class AI8Model(BasePlatform):
126
128
  data = response.json()
127
129
  if data['code'] == 0:
128
130
  self.conversation = data['data']
129
- PrettyOutput.print("会话设置更新成功", OutputType.SUCCESS)
130
131
  return True
131
132
  else:
132
133
  PrettyOutput.print(f"更新会话设置失败: {data.get('msg', '未知错误')}", OutputType.ERROR)
@@ -149,7 +150,6 @@ class AI8Model(BasePlatform):
149
150
  "name": name,
150
151
  "data": f"data:image/png;base64,{base64_data}"
151
152
  })
152
- PrettyOutput.print(f"文件 {name} 已准备好发送", OutputType.SUCCESS)
153
153
 
154
154
  def set_system_message(self, message: str):
155
155
  """Set system message"""
@@ -158,8 +158,6 @@ class AI8Model(BasePlatform):
158
158
  def chat(self, message: str) -> str:
159
159
  """执行对话"""
160
160
  try:
161
- if not self.suppress_output:
162
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
163
161
 
164
162
  # 确保有会话ID
165
163
  if not self.conversation:
@@ -263,7 +261,6 @@ class AI8Model(BasePlatform):
263
261
  if response.status_code == 200:
264
262
  data = response.json()
265
263
  if data['code'] == 0:
266
- PrettyOutput.print("会话删除成功", OutputType.SUCCESS)
267
264
  self.reset()
268
265
  return True
269
266
  else:
jarvis/models/base.py CHANGED
@@ -10,6 +10,10 @@ class BasePlatform(ABC):
10
10
  self.suppress_output = False # 添加输出控制标志
11
11
  pass
12
12
 
13
+ def __del__(self):
14
+ """销毁模型"""
15
+ self.delete_chat()
16
+
13
17
  def set_model_name(self, model_name: str):
14
18
  """设置模型名称"""
15
19
  raise NotImplementedError("set_model_name is not implemented")
jarvis/models/kimi.py CHANGED
@@ -162,10 +162,8 @@ class KimiModel(BasePlatform):
162
162
  if not file_list:
163
163
  return []
164
164
 
165
- PrettyOutput.print("Progress: 开始处理文件上传...", OutputType.PROGRESS)
166
165
 
167
166
  if not self.chat_id:
168
- PrettyOutput.print("创建新的对话会话...", OutputType.PROGRESS)
169
167
  if not self._create_chat():
170
168
  raise Exception("Failed to create chat session")
171
169
 
@@ -178,28 +176,23 @@ class KimiModel(BasePlatform):
178
176
  action = "image" if mime_type and mime_type.startswith('image/') else "file"
179
177
 
180
178
  # 获取预签名URL
181
- PrettyOutput.print("获取上传URL...", OutputType.PROGRESS)
182
179
  presigned_data = self._get_presigned_url(file_path, action)
183
180
 
184
181
  # 上传文件
185
- PrettyOutput.print("上传文件内容...", OutputType.PROGRESS)
186
182
  if self._upload_file(file_path, presigned_data["url"]):
187
183
  # 获取文件信息
188
- PrettyOutput.print("获取文件信息...", OutputType.PROGRESS)
184
+ file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
189
185
  file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
190
186
  # 等待文件解析
191
- PrettyOutput.print("等待文件解析完成...", OutputType.PROGRESS)
192
187
 
193
188
  # 只有文件需要解析
194
189
  if action == "file":
195
190
  if self._wait_for_parse(file_info["id"]):
196
191
  uploaded_files.append(file_info)
197
- PrettyOutput.print(f"Success: 文件处理成功: {file_path}", OutputType.SUCCESS)
198
192
  else:
199
193
  PrettyOutput.print(f"✗ 文件解析失败: {file_path}", OutputType.ERROR)
200
194
  else:
201
195
  uploaded_files.append(file_info)
202
- PrettyOutput.print(f"Success: 文件处理成功: {file_path}", OutputType.SUCCESS)
203
196
  else:
204
197
  PrettyOutput.print(f"Error: 文件上传失败: {file_path}", OutputType.ERROR)
205
198
 
@@ -207,19 +200,12 @@ class KimiModel(BasePlatform):
207
200
  PrettyOutput.print(f"✗ 处理文件出错 {file_path}: {str(e)}", OutputType.ERROR)
208
201
  continue
209
202
 
210
- if uploaded_files:
211
- PrettyOutput.print(f"成功处理 {len(uploaded_files)}/{len(file_list)} 个文件", OutputType.SUCCESS)
212
- else:
213
- PrettyOutput.print("没有文件成功处理", OutputType.ERROR)
214
-
215
203
  self.uploaded_files = uploaded_files
216
204
  return uploaded_files
217
205
 
218
206
  def chat(self, message: str) -> str:
219
207
  """发送消息并获取响应"""
220
208
  if not self.chat_id:
221
- if not self.suppress_output:
222
- PrettyOutput.print("创建新的对话会话...", OutputType.PROGRESS)
223
209
  if not self._create_chat():
224
210
  raise Exception("Failed to create chat session")
225
211
 
@@ -230,15 +216,11 @@ class KimiModel(BasePlatform):
230
216
  refs_file = []
231
217
  if self.first_chat:
232
218
  if self.uploaded_files:
233
- if not self.suppress_output:
234
- PrettyOutput.print(f"首次对话,引用 {len(self.uploaded_files)} 个文件...", OutputType.PROGRESS)
235
219
  refs = [f["id"] for f in self.uploaded_files]
236
220
  refs_file = self.uploaded_files
237
221
  message = self.system_message + "\n" + message
238
222
  self.first_chat = False
239
223
 
240
- if not self.suppress_output:
241
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
242
224
  payload = {
243
225
  "messages": [{"role": "user", "content": message}],
244
226
  "use_search": True,
@@ -263,9 +245,6 @@ class KimiModel(BasePlatform):
263
245
  search_results = []
264
246
  ref_sources = []
265
247
 
266
- if not self.suppress_output:
267
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
268
-
269
248
  for line in response.iter_lines():
270
249
  if not line:
271
250
  continue
@@ -382,7 +361,6 @@ class KimiModel(BasePlatform):
382
361
  try:
383
362
  response = while_success(lambda: requests.delete(url, headers=headers), sleep_time=5)
384
363
  if response.status_code == 200:
385
- PrettyOutput.print("会话已删除", OutputType.SUCCESS)
386
364
  self.reset()
387
365
  return True
388
366
  else:
jarvis/models/openai.py CHANGED
@@ -54,8 +54,6 @@ class OpenAIModel(BasePlatform):
54
54
  def chat(self, message: str) -> str:
55
55
  """执行对话"""
56
56
  try:
57
- if not self.suppress_output:
58
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
59
57
 
60
58
  # 添加用户消息到历史记录
61
59
  self.messages.append({"role": "user", "content": message})
@@ -66,8 +64,6 @@ class OpenAIModel(BasePlatform):
66
64
  stream=True
67
65
  )
68
66
 
69
- if not self.suppress_output:
70
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
71
67
  full_response = ""
72
68
 
73
69
  for chunk in response:
jarvis/models/oyi.py CHANGED
@@ -11,6 +11,8 @@ class OyiModel(BasePlatform):
11
11
 
12
12
  platform_name = "oyi"
13
13
  BASE_URL = "https://api-10086.rcouyi.com"
14
+
15
+ first_time = True
14
16
 
15
17
  def __init__(self):
16
18
  """Initialize model"""
@@ -19,11 +21,14 @@ class OyiModel(BasePlatform):
19
21
 
20
22
  # 获取可用模型列表
21
23
  available_models = self.get_available_models()
22
- if available_models:
23
- for model in available_models:
24
- PrettyOutput.print(model, OutputType.INFO)
25
- else:
26
- PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
24
+
25
+ if OyiModel.first_time:
26
+ OyiModel.first_time = False
27
+ if available_models:
28
+ for model in available_models:
29
+ PrettyOutput.print(model, OutputType.INFO)
30
+ else:
31
+ PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
27
32
 
28
33
  self.messages = []
29
34
  self.system_message = ""
@@ -85,7 +90,6 @@ class OyiModel(BasePlatform):
85
90
  data = response.json()
86
91
  if data['code'] == 200 and data['type'] == 'success':
87
92
  self.conversation = data
88
- PrettyOutput.print(f"创建会话成功: {data['result']['id']}", OutputType.SUCCESS)
89
93
  return True
90
94
  else:
91
95
  PrettyOutput.print(f"创建会话失败: {data['message']}", OutputType.ERROR)
@@ -112,9 +116,6 @@ class OyiModel(BasePlatform):
112
116
  str: Model response
113
117
  """
114
118
  try:
115
- if not self.suppress_output:
116
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
117
-
118
119
  # 确保有会话ID
119
120
  if not self.conversation:
120
121
  if not self.create_conversation():
@@ -184,7 +185,6 @@ class OyiModel(BasePlatform):
184
185
 
185
186
  if response.status_code == 200:
186
187
  if not self.suppress_output:
187
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
188
188
  PrettyOutput.print(response.text, OutputType.SYSTEM)
189
189
  self.messages.append({"role": "assistant", "content": response.text})
190
190
  return response.text
@@ -232,7 +232,6 @@ class OyiModel(BasePlatform):
232
232
  if response.status_code == 200:
233
233
  data = response.json()
234
234
  if data['code'] == 200 and data['type'] == 'success':
235
- PrettyOutput.print("会话删除成功", OutputType.SUCCESS)
236
235
  self.reset()
237
236
  return True
238
237
  else:
@@ -294,7 +293,6 @@ class OyiModel(BasePlatform):
294
293
  if response.status_code == 200:
295
294
  data = response.json()
296
295
  if data.get('code') == 200:
297
- PrettyOutput.print("文件上传成功", OutputType.SUCCESS)
298
296
  self.upload_files.append(data)
299
297
  return data
300
298
  else:
jarvis/tools/generator.py CHANGED
@@ -102,7 +102,6 @@ class ExampleTool:
102
102
 
103
103
  # 调用模型生成代码
104
104
  response = model.chat(prompt)
105
- model.delete_chat()
106
105
 
107
106
  # 提取代码块
108
107
  code_start = response.find("```python")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.81
3
+ Version: 0.1.83
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -1,21 +1,21 @@
1
- jarvis/__init__.py,sha256=B8CWgNiQrlCsbghyy-YN5DH1HNMJhUoKmyegydchC2w,50
2
- jarvis/agent.py,sha256=9cWBHjxrlqnI1GV3wB4wPvZUwK7d63ysyX40zqapNRc,19373
3
- jarvis/main.py,sha256=OJc7e5i988eQLByT7SzX7eoa0HKm4LMg814gZv9hBX8,5487
1
+ jarvis/__init__.py,sha256=S9oTgBnIxuAWrIt79cFytfBwF-uR1VoqSb-FgvZCcF0,50
2
+ jarvis/agent.py,sha256=2g2fl9BFosi4YasIEUgutS1pX6AMTf2wit5V0bZILMU,18944
3
+ jarvis/main.py,sha256=ksZkJzqc4oow6wB-7QbGJLejGblrbZtRI3fdciS5DS4,5455
4
4
  jarvis/utils.py,sha256=jvo6ylvrTaSmXWcYY0qTTf14TwCkAhPsCUuIl5WHEuw,8640
5
5
  jarvis/jarvis_codebase/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- jarvis/jarvis_codebase/main.py,sha256=z6HRTkUjAhRQclfHqj4yt3wq_wxiRlp0OCJsL_99NfI,24884
6
+ jarvis/jarvis_codebase/main.py,sha256=rlcN44mm67c-ZlCtcsq52jmqWlf4wPxvc-_HbgmoXOk,24510
7
7
  jarvis/jarvis_coder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- jarvis/jarvis_coder/main.py,sha256=WZCjtlPDEvg2y6F6e4djc4xYWXM3eB_0pHrPpvlZEC4,23890
8
+ jarvis/jarvis_coder/main.py,sha256=TxtFCzA5SJSorHtHX5_V3qQeJsoFMgVdrwxLJ9GnPw8,23619
9
9
  jarvis/jarvis_rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- jarvis/jarvis_rag/main.py,sha256=drCH9KmwQozxtPICIfTFXmkIhwN8WDreMOCTxtj9SPc,20842
10
+ jarvis/jarvis_rag/main.py,sha256=6G5rpcedxzALanGtXdhBxKR_uG4Sjiei8qolI-BuDp4,22682
11
11
  jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  jarvis/jarvis_smart_shell/main.py,sha256=QgR1CZRcTVfC8a5hMso3onH3pFdDoniRjr0YQvY2jXQ,3809
13
13
  jarvis/models/__init__.py,sha256=mrOt67nselz_H1gX9wdAO4y2DY5WPXzABqJbr5Des8k,63
14
- jarvis/models/ai8.py,sha256=vgy-r_3HHxGMAalZrA65VWHC1PuwBTYgtprSgHkCbrk,12557
15
- jarvis/models/base.py,sha256=ShV1H8Unee4RMaiFO4idROQA0Hc6wu4dyeRPX5fcszk,1433
16
- jarvis/models/kimi.py,sha256=1iTB0Z_WOmCML3Ufsge6jmeKOYvccr7I5lS3JUXymU4,17611
17
- jarvis/models/openai.py,sha256=ayaBWAN5VexMcKVrjEPDNB-Q9wx0sCV9Z4BCrvwYJ9w,4315
18
- jarvis/models/oyi.py,sha256=X2c5SWDIuQDCCFBcEKbzIWEz3I34eOAi0d1XAFgxlpw,15001
14
+ jarvis/models/ai8.py,sha256=P_p1ceyjn_olFk38WsubHGavWI9bMEBzo-1vo97HLPE,12215
15
+ jarvis/models/base.py,sha256=vQmgr-l0fRzVTUX4orbQZIKsXEInagjFdYv1cL9Hp7U,1511
16
+ jarvis/models/kimi.py,sha256=ltYoHQDn9vfZyhZ25eUVKMBpxKKlqlw4kManozVF7uo,16135
17
+ jarvis/models/openai.py,sha256=XiZ0omZdaOfDVdmXX-IA-Dg78R8xIH429L6j0R_yHSI,4083
18
+ jarvis/models/oyi.py,sha256=tdsBf3gegrEDkZf2VOJF9SRLwe9s2g8Al9s-qR9YQRg,14633
19
19
  jarvis/models/registry.py,sha256=Lt8IdVBAEx_CCFtfZJPgw3nxSEjfFcqI47I-U64kIbg,8257
20
20
  jarvis/tools/__init__.py,sha256=7Rqyj5hBAv5cWDVr5T9ZTZASO7ssBHeQNm2_4ZARdkA,72
21
21
  jarvis/tools/ask_user.py,sha256=xLXkYK3_f8EJ7kudA8MumOOCxyFl6z3DBS_zcscMH6Y,2151
@@ -24,16 +24,16 @@ jarvis/tools/chdir.py,sha256=TjfPbX8yvNKgUNJEMXh3ZlVDEIse_Fo8xMoVsiK7_dA,2688
24
24
  jarvis/tools/codebase_qa.py,sha256=LsowsgL7HBmdBwa7zXcYi_OkwOok4qbnzYWYsuZxHtU,2413
25
25
  jarvis/tools/coder.py,sha256=kmotT2Klsug44S51QoSW9DzkxLzcF-XonyYAEoWZV6c,2295
26
26
  jarvis/tools/file_ops.py,sha256=h8g0eT9UvlJf4kt0DLXvdSsjcPj7x19lxWdDApeDfpg,3842
27
- jarvis/tools/generator.py,sha256=S1DhHBfhNvF6SrORnlk8Mz210yDiJPuxbfswbX_UACs,5791
27
+ jarvis/tools/generator.py,sha256=TB1zcw_JmRL2W9w6L4IxtrLF3gjnNw5Jj2Zrowj0eSg,5763
28
28
  jarvis/tools/methodology.py,sha256=UG6s5VYRcd9wrKX4cg6f7zJhet5AIcthFGMOAdevBiw,5175
29
29
  jarvis/tools/registry.py,sha256=sgj5EVbRgfHSzXW5v-kbIZS_8cwAxTjHvSPAicxBRf4,9074
30
30
  jarvis/tools/search.py,sha256=c9dXtyICdl8Lm8shNPNyIx9k67uY0rMF8xnIKu2RsnE,8787
31
31
  jarvis/tools/shell.py,sha256=UPKshPyOaUwTngresUw-ot1jHjQIb4wCY5nkJqa38lU,2520
32
32
  jarvis/tools/sub_agent.py,sha256=rEtAmSVY2ZjFOZEKr5m5wpACOQIiM9Zr_3dT92FhXYU,2621
33
33
  jarvis/tools/webpage.py,sha256=d3w3Jcjcu1ESciezTkz3n3Zf-rp_l91PrVoDEZnckOo,2391
34
- jarvis_ai_assistant-0.1.81.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
35
- jarvis_ai_assistant-0.1.81.dist-info/METADATA,sha256=kVZXWoV6abWKNXWGuVA5Q0jodwrVDIgATc2jJEVhq6o,12736
36
- jarvis_ai_assistant-0.1.81.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
37
- jarvis_ai_assistant-0.1.81.dist-info/entry_points.txt,sha256=sdmIO86MrIUepJTGyHs0i_Ho9VGf1q9YRP4RgQvGWcI,280
38
- jarvis_ai_assistant-0.1.81.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
39
- jarvis_ai_assistant-0.1.81.dist-info/RECORD,,
34
+ jarvis_ai_assistant-0.1.83.dist-info/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
35
+ jarvis_ai_assistant-0.1.83.dist-info/METADATA,sha256=7r6J3Z_AjmLRXsQjyIVtJ1F4DnmepO_ES8PLFIE_BLY,12736
36
+ jarvis_ai_assistant-0.1.83.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
37
+ jarvis_ai_assistant-0.1.83.dist-info/entry_points.txt,sha256=sdmIO86MrIUepJTGyHs0i_Ho9VGf1q9YRP4RgQvGWcI,280
38
+ jarvis_ai_assistant-0.1.83.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
39
+ jarvis_ai_assistant-0.1.83.dist-info/RECORD,,