jarvis-ai-assistant 0.1.82__tar.gz → 0.1.84__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (46) hide show
  1. {jarvis_ai_assistant-0.1.82/src/jarvis_ai_assistant.egg-info → jarvis_ai_assistant-0.1.84}/PKG-INFO +1 -1
  2. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/pyproject.toml +1 -1
  3. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/setup.py +1 -1
  4. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/__init__.py +1 -1
  5. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/agent.py +1 -10
  6. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_codebase/main.py +53 -55
  7. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_coder/main.py +2 -9
  8. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_rag/main.py +52 -10
  9. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/main.py +1 -1
  10. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/ai8.py +37 -40
  11. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/base.py +4 -0
  12. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/kimi.py +1 -23
  13. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/openai.py +0 -4
  14. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/oyi.py +10 -12
  15. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/generator.py +0 -1
  16. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84/src/jarvis_ai_assistant.egg-info}/PKG-INFO +1 -1
  17. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/LICENSE +0 -0
  18. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/MANIFEST.in +0 -0
  19. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/README.md +0 -0
  20. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/setup.cfg +0 -0
  21. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_codebase/__init__.py +0 -0
  22. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_coder/__init__.py +0 -0
  23. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_rag/__init__.py +0 -0
  24. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_smart_shell/__init__.py +0 -0
  25. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/jarvis_smart_shell/main.py +0 -0
  26. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/__init__.py +0 -0
  27. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/models/registry.py +0 -0
  28. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/__init__.py +0 -0
  29. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/ask_user.py +0 -0
  30. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/base.py +0 -0
  31. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/chdir.py +0 -0
  32. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/codebase_qa.py +0 -0
  33. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/coder.py +0 -0
  34. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/file_ops.py +0 -0
  35. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/methodology.py +0 -0
  36. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/registry.py +0 -0
  37. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/search.py +0 -0
  38. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/shell.py +0 -0
  39. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/sub_agent.py +0 -0
  40. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/tools/webpage.py +0 -0
  41. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis/utils.py +0 -0
  42. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis_ai_assistant.egg-info/SOURCES.txt +0 -0
  43. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis_ai_assistant.egg-info/dependency_links.txt +0 -0
  44. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis_ai_assistant.egg-info/entry_points.txt +0 -0
  45. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis_ai_assistant.egg-info/requires.txt +0 -0
  46. {jarvis_ai_assistant-0.1.82 → jarvis_ai_assistant-0.1.84}/src/jarvis_ai_assistant.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.82
3
+ Version: 0.1.84
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "jarvis-ai-assistant"
7
- version = "0.1.82"
7
+ version = "0.1.84"
8
8
  description = "Jarvis: An AI assistant that uses tools to interact with the system"
9
9
  readme = "README.md"
10
10
  authors = [{ name = "Your Name", email = "your.email@example.com" }]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="jarvis-ai-assistant",
5
- version="0.1.82",
5
+ version="0.1.84",
6
6
  author="skyfire",
7
7
  author_email="skyfireitdiy@hotmail.com",
8
8
  description="An AI assistant that uses various tools to interact with the system",
@@ -1,3 +1,3 @@
1
1
  """Jarvis AI Assistant"""
2
2
 
3
- __version__ = "0.1.82"
3
+ __version__ = "0.1.84"
@@ -224,7 +224,6 @@ class Agent:
224
224
  summary = self.model.chat(self.prompt + "\n" + prompt)
225
225
 
226
226
  # 清空当前对话历史,但保留系统消息
227
- self.model.delete_chat()
228
227
  self.conversation_length = 0 # 重置对话长度
229
228
 
230
229
  # 添加总结作为新的上下文
@@ -293,13 +292,12 @@ class Agent:
293
292
  return self._call_model(self.prompt)
294
293
 
295
294
 
296
- def run(self, user_input: str, file_list: Optional[List[str]] = None, keep_history: bool = False) -> str:
295
+ def run(self, user_input: str, file_list: Optional[List[str]] = None) -> str:
297
296
  """处理用户输入并返回响应,返回任务总结报告
298
297
 
299
298
  Args:
300
299
  user_input: 用户输入的任务描述
301
300
  file_list: 可选的文件列表,默认为None
302
- keep_history: 是否保留对话历史,默认为False
303
301
 
304
302
  Returns:
305
303
  str: 任务总结报告
@@ -440,13 +438,6 @@ arguments:
440
438
  PrettyOutput.print(str(e), OutputType.ERROR)
441
439
  return f"Task failed: {str(e)}"
442
440
 
443
- finally:
444
- # 只在不保留历史时删除会话
445
- if not keep_history:
446
- try:
447
- self.model.delete_chat()
448
- except Exception as e:
449
- PrettyOutput.print(f"清理会话时发生错误: {str(e)}", OutputType.ERROR)
450
441
 
451
442
  def clear_history(self):
452
443
  """清除对话历史,只保留系统提示"""
@@ -18,7 +18,7 @@ class CodeBase:
18
18
  load_env_from_file()
19
19
  self.root_dir = root_dir
20
20
  os.chdir(self.root_dir)
21
- self.thread_count = int(os.environ.get("JARVIS_THREAD_COUNT") or 10)
21
+ self.thread_count = int(os.environ.get("JARVIS_THREAD_COUNT") or 1)
22
22
  self.max_context_length = int(os.getenv("JARVIS_MAX_CONTEXT_LENGTH", 65536))
23
23
 
24
24
  # 初始化数据目录
@@ -86,10 +86,9 @@ class CodeBase:
86
86
  except UnicodeDecodeError:
87
87
  return False
88
88
 
89
- def make_description(self, file_path: str) -> str:
89
+ def make_description(self, file_path: str, content: str) -> str:
90
90
  model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
91
91
  model.set_suppress_output(True)
92
- content = open(file_path, "r", encoding="utf-8").read()
93
92
  prompt = f"""请分析以下代码文件,并生成一个详细的描述。描述应该包含以下要点:
94
93
 
95
94
  1. 主要功能和用途
@@ -237,14 +236,22 @@ class CodeBase:
237
236
  if not self.is_text_file(file_path):
238
237
  return None
239
238
 
240
- md5 = hashlib.md5(open(file_path, "rb").read()).hexdigest()
239
+ # 读取文件内容,限制长度
240
+ with open(file_path, "r", encoding="utf-8") as f:
241
+ content = f.read()
242
+ if len(content) > self.max_context_length:
243
+ PrettyOutput.print(f"文件 {file_path} 内容超出长度限制,将截取前 {self.max_context_length} 个字符",
244
+ output_type=OutputType.WARNING)
245
+ content = content[:self.max_context_length]
246
+
247
+ md5 = hashlib.md5(content.encode('utf-8')).hexdigest()
241
248
 
242
249
  # 检查文件是否已经处理过且内容未变
243
250
  if file_path in self.vector_cache:
244
251
  if self.vector_cache[file_path].get("md5") == md5:
245
252
  return None
246
253
 
247
- description = self.make_description(file_path)
254
+ description = self.make_description(file_path, content) # 传入截取后的内容
248
255
  vector = self.vectorize_file(file_path, description)
249
256
 
250
257
  # 保存到缓存,使用实际文件路径作为键
@@ -395,12 +402,11 @@ class CodeBase:
395
402
  if not initial_results:
396
403
  return []
397
404
 
398
- model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
399
- model.set_suppress_output(True)
405
+ model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
406
+ # model.set_suppress_output(True)
400
407
 
401
- try:
402
- # 构建重排序的prompt
403
- prompt = f"""请根据用户的查询,对以下代码文件进行相关性排序。对每个文件给出0-100的相关性分数,分数越高表示越相关。
408
+ # 构建重排序的prompt
409
+ prompt = f"""请根据用户的查询,对以下代码文件进行相关性排序。对每个文件给出0-100的相关性分数,分数越高表示越相关。
404
410
  只需要输出每个文件的分数,格式为:
405
411
  <RERANK_START>
406
412
  文件路径: 分数
@@ -411,62 +417,57 @@ class CodeBase:
411
417
 
412
418
  待评估文件:
413
419
  """
414
- for path, _, desc in initial_results:
415
- prompt += f"""
420
+ for path, _, desc in initial_results:
421
+ prompt += f"""
416
422
  文件: {path}
417
423
  描述: {desc}
418
424
  ---
419
425
  """
420
-
421
- response = model.chat(prompt)
422
-
423
- # 提取<RERANK_START>和<RERANK_END>之间的内容
424
- start_tag = "<RERANK_START>"
425
- end_tag = "<RERANK_END>"
426
- if start_tag in response and end_tag in response:
427
- response = response[response.find(start_tag) + len(start_tag):response.find(end_tag)]
428
-
429
- # 解析响应,提取文件路径和分数
430
- scored_results = []
431
- for line in response.split('\n'):
432
- if ':' not in line:
433
- continue
434
- try:
435
- file_path, score_str = line.split(':', 1)
436
- file_path = file_path.strip()
437
- score = float(score_str.strip()) / 100.0 # 转换为0-1范围
438
- # 只保留相关度大于等于0.7的结果
439
- if score >= 0.7:
440
- # 找到对应的原始描述
441
- desc = next((desc for p, _, desc in initial_results if p == file_path), "")
442
- scored_results.append((file_path, score, desc))
443
- except:
444
- continue
445
-
446
- # 按分数降序排序
447
- return sorted(scored_results, key=lambda x: x[1], reverse=True)
448
-
449
- finally:
450
- model.delete_chat()
451
426
 
452
- return initial_results
427
+ response = model.chat(prompt)
428
+
429
+ # 提取<RERANK_START>和<RERANK_END>之间的内容
430
+ start_tag = "<RERANK_START>"
431
+ end_tag = "<RERANK_END>"
432
+ if start_tag in response and end_tag in response:
433
+ response = response[response.find(start_tag) + len(start_tag):response.find(end_tag)]
434
+
435
+ # 解析响应,提取文件路径和分数
436
+ scored_results = []
437
+ for line in response.split('\n'):
438
+ if ':' not in line:
439
+ continue
440
+ try:
441
+ file_path, score_str = line.split(':', 1)
442
+ file_path = file_path.strip()
443
+ score = float(score_str.strip()) / 100.0 # 转换为0-1范围
444
+ # 只保留相关度大于等于0.7的结果
445
+ if score >= 0.7:
446
+ # 找到对应的原始描述
447
+ desc = next((desc for p, _, desc in initial_results if p == file_path), "")
448
+ scored_results.append((file_path, score, desc))
449
+ except:
450
+ continue
451
+
452
+ # 按分数降序排序
453
+ return sorted(scored_results, key=lambda x: x[1], reverse=True)
454
+
453
455
 
454
456
  def search_similar(self, query: str, top_k: int = 30) -> List[Tuple[str, float, str]]:
455
457
  """搜索相似文件"""
456
458
  model = PlatformRegistry.get_global_platform_registry().get_normal_platform()
457
459
  model.set_suppress_output(True)
458
460
 
459
- try:
460
- prompt = f"""请根据以下查询,生成意思完全相同的另一个表述。这个表述将用于代码搜索,所以要保持专业性和准确性。
461
+
462
+ prompt = f"""请根据以下查询,生成意思完全相同的另一个表述。这个表述将用于代码搜索,所以要保持专业性和准确性。
461
463
  原始查询: {query}
462
464
 
463
465
  请直接输出新表述,不要有编号或其他标记。
464
466
  """
467
+
468
+ query = model.chat(prompt)
465
469
 
466
- query = model.chat(prompt)
467
-
468
- finally:
469
- model.delete_chat()
470
+
470
471
 
471
472
  PrettyOutput.print(f"查询: {query}", output_type=OutputType.INFO)
472
473
 
@@ -537,11 +538,8 @@ class CodeBase:
537
538
  请用专业的语言回答用户的问题,如果给出的文件内容不足以回答用户的问题,请告诉用户,绝对不要胡编乱造。
538
539
  """
539
540
  model = PlatformRegistry.get_global_platform_registry().get_codegen_platform()
540
- try:
541
- response = model.chat(prompt)
542
- return response
543
- finally:
544
- model.delete_chat()
541
+ response = model.chat(prompt)
542
+ return response
545
543
 
546
544
  def is_index_generated(self) -> bool:
547
545
  """检查索引是否已经生成"""
@@ -68,10 +68,7 @@ class JarvisCoder:
68
68
 
69
69
  def _new_model(self):
70
70
  """获取大模型"""
71
- model = PlatformRegistry().get_global_platform_registry().create_platform(self.platform)
72
- if self.model:
73
- model_name = self.model
74
- model.set_model_name(model_name)
71
+ model = PlatformRegistry().get_global_platform_registry().get_codegen_platform()
75
72
  return model
76
73
 
77
74
  def _has_uncommitted_files(self) -> bool:
@@ -407,8 +404,7 @@ class JarvisCoder:
407
404
  """
408
405
 
409
406
  # 使用normal模型生成commit信息
410
- model = PlatformRegistry().get_global_platform_registry().create_platform(self.platform)
411
- model.set_model_name(self.model)
407
+ model = PlatformRegistry().get_global_platform_registry().get_codegen_platform()
412
408
  model.set_suppress_output(True)
413
409
  success, response = self._call_model_with_retry(model, prompt)
414
410
  if not success:
@@ -486,9 +482,6 @@ def main():
486
482
  parser.add_argument('-l', '--language', help='编程语言', default="python")
487
483
  args = parser.parse_args()
488
484
 
489
- # 设置平台
490
- if not args.platform:
491
- print("错误: 未指定AI平台,请使用 -p 参数")
492
485
 
493
486
  tool = JarvisCoder(args.dir, args.language)
494
487
 
@@ -141,6 +141,7 @@ class RAGTool:
141
141
  # 初始化配置
142
142
  self.min_paragraph_length = int(os.environ.get("JARVIS_MIN_PARAGRAPH_LENGTH", "50")) # 最小段落长度
143
143
  self.max_paragraph_length = int(os.environ.get("JARVIS_MAX_PARAGRAPH_LENGTH", "1000")) # 最大段落长度
144
+ self.context_window = int(os.environ.get("JARVIS_CONTEXT_WINDOW", "5")) # 上下文窗口大小,默认前后各5个片段
144
145
 
145
146
  # 初始化数据目录
146
147
  self.data_dir = os.path.join(self.root_dir, ".jarvis-rag")
@@ -160,6 +161,7 @@ class RAGTool:
160
161
  self.cache_path = os.path.join(self.data_dir, "cache.pkl")
161
162
  self.documents: List[Document] = []
162
163
  self.index = None
164
+ self.max_context_length = int(os.getenv("JARVIS_MAX_CONTEXT_LENGTH", 65536))
163
165
 
164
166
  # 加载缓存
165
167
  self._load_cache()
@@ -424,11 +426,58 @@ class RAGTool:
424
426
 
425
427
  # 返回结果
426
428
  results = []
429
+ current_length = 0
430
+
427
431
  for idx, distance in zip(indices[0], distances[0]):
428
432
  if idx == -1: # FAISS返回-1表示无效结果
429
433
  continue
434
+
435
+ doc = self.documents[idx]
430
436
  similarity = 1.0 / (1.0 + float(distance))
431
- results.append((self.documents[idx], similarity))
437
+
438
+ # 获取同一文件中的所有文档片段
439
+ file_docs = [d for d in self.documents if d.metadata['file_path'] == doc.metadata['file_path']]
440
+ file_docs.sort(key=lambda x: x.metadata['chunk_index'])
441
+
442
+ # 找到当前片段的索引
443
+ current_idx = file_docs.index(doc)
444
+
445
+ # 尝试不同的上下文窗口大小,从最大到最小
446
+ added = False
447
+ for window_size in range(self.context_window, -1, -1):
448
+ start_idx = max(0, current_idx - window_size)
449
+ end_idx = min(len(file_docs), current_idx + window_size + 1)
450
+
451
+ # 合并内容,包含上下文
452
+ content_parts = []
453
+ content_parts.extend(file_docs[i].content for i in range(start_idx, current_idx))
454
+ content_parts.append(doc.content)
455
+ content_parts.extend(file_docs[i].content for i in range(current_idx + 1, end_idx))
456
+
457
+ merged_content = "\n".join(content_parts)
458
+
459
+ # 创建文档对象
460
+ context_doc = Document(
461
+ content=merged_content,
462
+ metadata={
463
+ **doc.metadata,
464
+ "similarity": similarity
465
+ }
466
+ )
467
+
468
+ # 计算添加这个结果后的总长度
469
+ total_content_length = len(merged_content)
470
+
471
+ # 检查是否在长度限制内
472
+ if current_length + total_content_length <= self.max_context_length:
473
+ results.append((context_doc, similarity))
474
+ current_length += total_content_length
475
+ added = True
476
+ break
477
+
478
+ # 如果即使没有上下文也无法添加,就停止添加更多结果
479
+ if not added:
480
+ break
432
481
 
433
482
  return results
434
483
 
@@ -443,11 +492,8 @@ class RAGTool:
443
492
  query: 查询文本
444
493
 
445
494
  Returns:
446
- 相关文档列表
495
+ 相关文档列表,包含上下文
447
496
  """
448
- if not self.is_index_built():
449
- raise ValueError("索引未构建,请先调用build_index()")
450
-
451
497
  results = self.search(query)
452
498
  return [doc for doc, _ in results]
453
499
 
@@ -471,14 +517,13 @@ class RAGTool:
471
517
  for doc in results:
472
518
  context.append(f"""
473
519
  来源文件: {doc.metadata['file_path']}
474
- 片段位置: {doc.metadata['chunk_index'] + 1}/{doc.metadata['total_chunks']}
475
520
  内容:
476
521
  {doc.content}
477
522
  ---
478
523
  """)
479
524
 
480
525
  # 构建提示词
481
- prompt = f"""请基于以下文档片段回答用户的问题。如果文档片段中的信息不足以完整回答问题,请明确指出。
526
+ prompt = f"""请基于以下文档片段回答用户的问题。如果文档内容不足以完整回答问题,请明确指出。
482
527
 
483
528
  用户问题: {question}
484
529
 
@@ -525,9 +570,6 @@ def main():
525
570
  return 0
526
571
 
527
572
  if args.search or args.ask:
528
- if not rag.is_index_built():
529
- PrettyOutput.print("索引尚未构建,请先使用 --dir 和 --build 参数构建索引", output_type=OutputType.WARNING)
530
- return 1
531
573
 
532
574
  if args.search:
533
575
  results = rag.query(args.search)
@@ -123,7 +123,7 @@ def main():
123
123
  selected_task = select_task(tasks)
124
124
  if selected_task:
125
125
  PrettyOutput.print(f"\n执行任务: {selected_task}", OutputType.INFO)
126
- agent.run(selected_task, args.files, keep_history=args.keep_history)
126
+ agent.run(selected_task, args.files)
127
127
  return 0
128
128
 
129
129
  # 如果没有选择预定义任务,进入交互模式
@@ -11,6 +11,8 @@ class AI8Model(BasePlatform):
11
11
 
12
12
  platform_name = "ai8"
13
13
  BASE_URL = "https://ai8.rcouyi.com"
14
+
15
+ first_time = True
14
16
 
15
17
  def __init__(self):
16
18
  """Initialize model"""
@@ -23,46 +25,47 @@ class AI8Model(BasePlatform):
23
25
  # 获取可用模型列表
24
26
  available_models = self.get_available_models()
25
27
 
26
- if available_models:
27
- PrettyOutput.section("支持的模型", OutputType.SUCCESS)
28
- for model in self.models.values():
29
- # 格式化显示模型信息
30
- model_str = f"{model['value']:<30}"
31
-
32
- # 添加标签
33
- model_str += f"{model['label']}"
34
-
35
- # 添加标签和积分信息
36
- attrs = []
37
- if model['attr'].get('tag'):
38
- attrs.append(model['attr']['tag'])
39
- if model['attr'].get('integral'):
40
- attrs.append(model['attr']['integral'])
28
+ if AI8Model.first_time:
29
+ AI8Model.first_time = False
30
+ if available_models:
31
+ PrettyOutput.section("支持的模型", OutputType.SUCCESS)
32
+ for model in self.models.values():
33
+ # 格式化显示模型信息
34
+ model_str = f"{model['value']:<30}"
41
35
 
42
- # 添加特性标记
43
- features = []
44
- if model['attr'].get('multimodal'):
45
- features.append("多模态")
46
- if model['attr'].get('plugin'):
47
- features.append("插件支持")
48
- if model['attr'].get('onlyImg'):
49
- features.append("图像支持")
50
- if features:
51
- model_str += f" [{'|'.join(features)}]"
36
+ # 添加标签
37
+ model_str += f"{model['label']}"
52
38
 
53
- # 添加备注
54
- if model['attr'].get('note'):
55
- model_str += f" - {model['attr']['note']}"
56
-
57
- PrettyOutput.print(model_str, OutputType.INFO)
58
- else:
59
- PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
39
+ # 添加标签和积分信息
40
+ attrs = []
41
+ if model['attr'].get('tag'):
42
+ attrs.append(model['attr']['tag'])
43
+ if model['attr'].get('integral'):
44
+ attrs.append(model['attr']['integral'])
45
+
46
+ # 添加特性标记
47
+ features = []
48
+ if model['attr'].get('multimodal'):
49
+ features.append("多模态")
50
+ if model['attr'].get('plugin'):
51
+ features.append("插件支持")
52
+ if model['attr'].get('onlyImg'):
53
+ features.append("图像支持")
54
+ if features:
55
+ model_str += f" [{'|'.join(features)}]"
56
+
57
+ # 添加备注
58
+ if model['attr'].get('note'):
59
+ model_str += f" - {model['attr']['note']}"
60
+
61
+ PrettyOutput.print(model_str, OutputType.INFO)
62
+ else:
63
+ PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
60
64
 
61
65
  self.token = os.getenv("AI8_API_KEY")
62
66
  if not self.token:
63
67
  raise Exception("AI8_API_KEY is not set")
64
68
 
65
- PrettyOutput.print("使用AI8_MODEL环境变量配置模型", OutputType.SUCCESS)
66
69
 
67
70
  self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
68
71
  if self.model_name not in self.models:
@@ -103,7 +106,6 @@ class AI8Model(BasePlatform):
103
106
  return False
104
107
 
105
108
  self.conversation = data['data']
106
- PrettyOutput.print(f"创建会话成功: {data['data']['id']}", OutputType.SUCCESS)
107
109
 
108
110
  # 2. 更新会话设置
109
111
  session_data = {
@@ -111,7 +113,7 @@ class AI8Model(BasePlatform):
111
113
  "model": self.model_name,
112
114
  "contextCount": 1024,
113
115
  "prompt": self.system_message,
114
- "plugins": ["tavily_search"],
116
+ "plugins": [],
115
117
  "localPlugins": None,
116
118
  "useAppId": 0
117
119
  }
@@ -126,7 +128,6 @@ class AI8Model(BasePlatform):
126
128
  data = response.json()
127
129
  if data['code'] == 0:
128
130
  self.conversation = data['data']
129
- PrettyOutput.print("会话设置更新成功", OutputType.SUCCESS)
130
131
  return True
131
132
  else:
132
133
  PrettyOutput.print(f"更新会话设置失败: {data.get('msg', '未知错误')}", OutputType.ERROR)
@@ -149,7 +150,6 @@ class AI8Model(BasePlatform):
149
150
  "name": name,
150
151
  "data": f"data:image/png;base64,{base64_data}"
151
152
  })
152
- PrettyOutput.print(f"文件 {name} 已准备好发送", OutputType.SUCCESS)
153
153
 
154
154
  def set_system_message(self, message: str):
155
155
  """Set system message"""
@@ -158,8 +158,6 @@ class AI8Model(BasePlatform):
158
158
  def chat(self, message: str) -> str:
159
159
  """执行对话"""
160
160
  try:
161
- if not self.suppress_output:
162
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
163
161
 
164
162
  # 确保有会话ID
165
163
  if not self.conversation:
@@ -263,7 +261,6 @@ class AI8Model(BasePlatform):
263
261
  if response.status_code == 200:
264
262
  data = response.json()
265
263
  if data['code'] == 0:
266
- PrettyOutput.print("会话删除成功", OutputType.SUCCESS)
267
264
  self.reset()
268
265
  return True
269
266
  else:
@@ -10,6 +10,10 @@ class BasePlatform(ABC):
10
10
  self.suppress_output = False # 添加输出控制标志
11
11
  pass
12
12
 
13
+ def __del__(self):
14
+ """销毁模型"""
15
+ self.delete_chat()
16
+
13
17
  def set_model_name(self, model_name: str):
14
18
  """设置模型名称"""
15
19
  raise NotImplementedError("set_model_name is not implemented")
@@ -162,10 +162,8 @@ class KimiModel(BasePlatform):
162
162
  if not file_list:
163
163
  return []
164
164
 
165
- PrettyOutput.print("Progress: 开始处理文件上传...", OutputType.PROGRESS)
166
165
 
167
166
  if not self.chat_id:
168
- PrettyOutput.print("创建新的对话会话...", OutputType.PROGRESS)
169
167
  if not self._create_chat():
170
168
  raise Exception("Failed to create chat session")
171
169
 
@@ -178,28 +176,23 @@ class KimiModel(BasePlatform):
178
176
  action = "image" if mime_type and mime_type.startswith('image/') else "file"
179
177
 
180
178
  # 获取预签名URL
181
- PrettyOutput.print("获取上传URL...", OutputType.PROGRESS)
182
179
  presigned_data = self._get_presigned_url(file_path, action)
183
180
 
184
181
  # 上传文件
185
- PrettyOutput.print("上传文件内容...", OutputType.PROGRESS)
186
182
  if self._upload_file(file_path, presigned_data["url"]):
187
183
  # 获取文件信息
188
- PrettyOutput.print("获取文件信息...", OutputType.PROGRESS)
184
+ file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
189
185
  file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
190
186
  # 等待文件解析
191
- PrettyOutput.print("等待文件解析完成...", OutputType.PROGRESS)
192
187
 
193
188
  # 只有文件需要解析
194
189
  if action == "file":
195
190
  if self._wait_for_parse(file_info["id"]):
196
191
  uploaded_files.append(file_info)
197
- PrettyOutput.print(f"Success: 文件处理成功: {file_path}", OutputType.SUCCESS)
198
192
  else:
199
193
  PrettyOutput.print(f"✗ 文件解析失败: {file_path}", OutputType.ERROR)
200
194
  else:
201
195
  uploaded_files.append(file_info)
202
- PrettyOutput.print(f"Success: 文件处理成功: {file_path}", OutputType.SUCCESS)
203
196
  else:
204
197
  PrettyOutput.print(f"Error: 文件上传失败: {file_path}", OutputType.ERROR)
205
198
 
@@ -207,19 +200,12 @@ class KimiModel(BasePlatform):
207
200
  PrettyOutput.print(f"✗ 处理文件出错 {file_path}: {str(e)}", OutputType.ERROR)
208
201
  continue
209
202
 
210
- if uploaded_files:
211
- PrettyOutput.print(f"成功处理 {len(uploaded_files)}/{len(file_list)} 个文件", OutputType.SUCCESS)
212
- else:
213
- PrettyOutput.print("没有文件成功处理", OutputType.ERROR)
214
-
215
203
  self.uploaded_files = uploaded_files
216
204
  return uploaded_files
217
205
 
218
206
  def chat(self, message: str) -> str:
219
207
  """发送消息并获取响应"""
220
208
  if not self.chat_id:
221
- if not self.suppress_output:
222
- PrettyOutput.print("创建新的对话会话...", OutputType.PROGRESS)
223
209
  if not self._create_chat():
224
210
  raise Exception("Failed to create chat session")
225
211
 
@@ -230,15 +216,11 @@ class KimiModel(BasePlatform):
230
216
  refs_file = []
231
217
  if self.first_chat:
232
218
  if self.uploaded_files:
233
- if not self.suppress_output:
234
- PrettyOutput.print(f"首次对话,引用 {len(self.uploaded_files)} 个文件...", OutputType.PROGRESS)
235
219
  refs = [f["id"] for f in self.uploaded_files]
236
220
  refs_file = self.uploaded_files
237
221
  message = self.system_message + "\n" + message
238
222
  self.first_chat = False
239
223
 
240
- if not self.suppress_output:
241
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
242
224
  payload = {
243
225
  "messages": [{"role": "user", "content": message}],
244
226
  "use_search": True,
@@ -263,9 +245,6 @@ class KimiModel(BasePlatform):
263
245
  search_results = []
264
246
  ref_sources = []
265
247
 
266
- if not self.suppress_output:
267
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
268
-
269
248
  for line in response.iter_lines():
270
249
  if not line:
271
250
  continue
@@ -382,7 +361,6 @@ class KimiModel(BasePlatform):
382
361
  try:
383
362
  response = while_success(lambda: requests.delete(url, headers=headers), sleep_time=5)
384
363
  if response.status_code == 200:
385
- PrettyOutput.print("会话已删除", OutputType.SUCCESS)
386
364
  self.reset()
387
365
  return True
388
366
  else:
@@ -54,8 +54,6 @@ class OpenAIModel(BasePlatform):
54
54
  def chat(self, message: str) -> str:
55
55
  """执行对话"""
56
56
  try:
57
- if not self.suppress_output:
58
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
59
57
 
60
58
  # 添加用户消息到历史记录
61
59
  self.messages.append({"role": "user", "content": message})
@@ -66,8 +64,6 @@ class OpenAIModel(BasePlatform):
66
64
  stream=True
67
65
  )
68
66
 
69
- if not self.suppress_output:
70
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
71
67
  full_response = ""
72
68
 
73
69
  for chunk in response:
@@ -11,6 +11,8 @@ class OyiModel(BasePlatform):
11
11
 
12
12
  platform_name = "oyi"
13
13
  BASE_URL = "https://api-10086.rcouyi.com"
14
+
15
+ first_time = True
14
16
 
15
17
  def __init__(self):
16
18
  """Initialize model"""
@@ -19,11 +21,14 @@ class OyiModel(BasePlatform):
19
21
 
20
22
  # 获取可用模型列表
21
23
  available_models = self.get_available_models()
22
- if available_models:
23
- for model in available_models:
24
- PrettyOutput.print(model, OutputType.INFO)
25
- else:
26
- PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
24
+
25
+ if OyiModel.first_time:
26
+ OyiModel.first_time = False
27
+ if available_models:
28
+ for model in available_models:
29
+ PrettyOutput.print(model, OutputType.INFO)
30
+ else:
31
+ PrettyOutput.print("获取模型列表失败", OutputType.WARNING)
27
32
 
28
33
  self.messages = []
29
34
  self.system_message = ""
@@ -85,7 +90,6 @@ class OyiModel(BasePlatform):
85
90
  data = response.json()
86
91
  if data['code'] == 200 and data['type'] == 'success':
87
92
  self.conversation = data
88
- PrettyOutput.print(f"创建会话成功: {data['result']['id']}", OutputType.SUCCESS)
89
93
  return True
90
94
  else:
91
95
  PrettyOutput.print(f"创建会话失败: {data['message']}", OutputType.ERROR)
@@ -112,9 +116,6 @@ class OyiModel(BasePlatform):
112
116
  str: Model response
113
117
  """
114
118
  try:
115
- if not self.suppress_output:
116
- PrettyOutput.print("发送请求...", OutputType.PROGRESS)
117
-
118
119
  # 确保有会话ID
119
120
  if not self.conversation:
120
121
  if not self.create_conversation():
@@ -184,7 +185,6 @@ class OyiModel(BasePlatform):
184
185
 
185
186
  if response.status_code == 200:
186
187
  if not self.suppress_output:
187
- PrettyOutput.print("接收响应...", OutputType.PROGRESS)
188
188
  PrettyOutput.print(response.text, OutputType.SYSTEM)
189
189
  self.messages.append({"role": "assistant", "content": response.text})
190
190
  return response.text
@@ -232,7 +232,6 @@ class OyiModel(BasePlatform):
232
232
  if response.status_code == 200:
233
233
  data = response.json()
234
234
  if data['code'] == 200 and data['type'] == 'success':
235
- PrettyOutput.print("会话删除成功", OutputType.SUCCESS)
236
235
  self.reset()
237
236
  return True
238
237
  else:
@@ -294,7 +293,6 @@ class OyiModel(BasePlatform):
294
293
  if response.status_code == 200:
295
294
  data = response.json()
296
295
  if data.get('code') == 200:
297
- PrettyOutput.print("文件上传成功", OutputType.SUCCESS)
298
296
  self.upload_files.append(data)
299
297
  return data
300
298
  else:
@@ -102,7 +102,6 @@ class ExampleTool:
102
102
 
103
103
  # 调用模型生成代码
104
104
  response = model.chat(prompt)
105
- model.delete_chat()
106
105
 
107
106
  # 提取代码块
108
107
  code_start = response.find("```python")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.82
3
+ Version: 0.1.84
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire