jarvis-ai-assistant 0.1.96__py3-none-any.whl → 0.1.97__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (41) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/agent.py +138 -144
  3. jarvis/jarvis_codebase/main.py +87 -54
  4. jarvis/jarvis_coder/git_utils.py +4 -7
  5. jarvis/jarvis_coder/main.py +17 -22
  6. jarvis/jarvis_coder/patch_handler.py +141 -441
  7. jarvis/jarvis_coder/plan_generator.py +64 -36
  8. jarvis/jarvis_platform/main.py +1 -1
  9. jarvis/jarvis_rag/main.py +1 -1
  10. jarvis/jarvis_smart_shell/main.py +15 -15
  11. jarvis/main.py +24 -24
  12. jarvis/models/ai8.py +22 -22
  13. jarvis/models/base.py +17 -13
  14. jarvis/models/kimi.py +31 -31
  15. jarvis/models/ollama.py +28 -28
  16. jarvis/models/openai.py +22 -24
  17. jarvis/models/oyi.py +25 -25
  18. jarvis/models/registry.py +33 -34
  19. jarvis/tools/ask_user.py +5 -5
  20. jarvis/tools/base.py +2 -2
  21. jarvis/tools/chdir.py +9 -9
  22. jarvis/tools/codebase_qa.py +4 -4
  23. jarvis/tools/coder.py +4 -4
  24. jarvis/tools/file_ops.py +1 -1
  25. jarvis/tools/generator.py +23 -23
  26. jarvis/tools/methodology.py +4 -4
  27. jarvis/tools/rag.py +4 -4
  28. jarvis/tools/registry.py +38 -38
  29. jarvis/tools/search.py +42 -42
  30. jarvis/tools/shell.py +13 -13
  31. jarvis/tools/sub_agent.py +16 -16
  32. jarvis/tools/thinker.py +41 -41
  33. jarvis/tools/webpage.py +17 -17
  34. jarvis/utils.py +59 -60
  35. {jarvis_ai_assistant-0.1.96.dist-info → jarvis_ai_assistant-0.1.97.dist-info}/METADATA +1 -1
  36. jarvis_ai_assistant-0.1.97.dist-info/RECORD +47 -0
  37. jarvis_ai_assistant-0.1.96.dist-info/RECORD +0 -47
  38. {jarvis_ai_assistant-0.1.96.dist-info → jarvis_ai_assistant-0.1.97.dist-info}/LICENSE +0 -0
  39. {jarvis_ai_assistant-0.1.96.dist-info → jarvis_ai_assistant-0.1.97.dist-info}/WHEEL +0 -0
  40. {jarvis_ai_assistant-0.1.96.dist-info → jarvis_ai_assistant-0.1.97.dist-info}/entry_points.txt +0 -0
  41. {jarvis_ai_assistant-0.1.96.dist-info → jarvis_ai_assistant-0.1.97.dist-info}/top_level.txt +0 -0
jarvis/models/kimi.py CHANGED
@@ -9,38 +9,38 @@ from jarvis.utils import PrettyOutput, OutputType
9
9
  from jarvis.utils import while_success
10
10
 
11
11
  class KimiModel(BasePlatform):
12
- """Kimi模型实现"""
12
+ """Kimi model implementation"""
13
13
 
14
14
  platform_name = "kimi"
15
15
 
16
16
  def get_model_list(self) -> List[Tuple[str, str]]:
17
- """获取模型列表"""
18
- return [("kimi", "基于网页Kimi的封装,免费接口")]
17
+ """Get model list"""
18
+ return [("kimi", "Based on the web Kimi, free interface")]
19
19
 
20
20
  def __init__(self):
21
21
  """
22
- 初始化Kimi模型
22
+ Initialize Kimi model
23
23
  """
24
24
  super().__init__()
25
25
  self.chat_id = ""
26
26
  self.api_key = os.getenv("KIMI_API_KEY")
27
27
  if not self.api_key:
28
- PrettyOutput.print("\n需要设置 KIMI_API_KEY 才能使用 Jarvis。请按以下步骤操作:", OutputType.INFO)
29
- PrettyOutput.print("\n1. 获取 Kimi API Key:", OutputType.INFO)
30
- PrettyOutput.print(" • 访问 Kimi AI 平台: https://kimi.moonshot.cn", OutputType.INFO)
31
- PrettyOutput.print(" • 登录您的账号", OutputType.INFO)
32
- PrettyOutput.print(" • 打开浏览器开发者工具 (F12 或右键 -> 检查)", OutputType.INFO)
33
- PrettyOutput.print(" • 切换到 Network 标签页", OutputType.INFO)
34
- PrettyOutput.print(" • 发送任意消息", OutputType.INFO)
35
- PrettyOutput.print(" • 在请求中找到 Authorization 头部", OutputType.INFO)
36
- PrettyOutput.print(" • 复制 token 值(去掉 'Bearer ' 前缀)", OutputType.INFO)
37
- PrettyOutput.print("\n2. 设置环境变量:", OutputType.INFO)
38
- PrettyOutput.print(" 方法 1: 创建或编辑 ~/.jarvis_env 文件:", OutputType.INFO)
28
+ PrettyOutput.print("\nNeed to set KIMI_API_KEY to use Jarvis. Please follow the steps below:", OutputType.INFO)
29
+ PrettyOutput.print("\n1. Get Kimi API Key:", OutputType.INFO)
30
+ PrettyOutput.print(" • Visit Kimi AI platform: https://kimi.moonshot.cn", OutputType.INFO)
31
+ PrettyOutput.print(" • Login to your account", OutputType.INFO)
32
+ PrettyOutput.print(" • Open browser developer tools (F12 or right-click -> Inspect)", OutputType.INFO)
33
+ PrettyOutput.print(" • Switch to the Network tab", OutputType.INFO)
34
+ PrettyOutput.print(" • Send any message", OutputType.INFO)
35
+ PrettyOutput.print(" • Find the Authorization header in the request", OutputType.INFO)
36
+ PrettyOutput.print(" • Copy the token value (remove the 'Bearer ' prefix)", OutputType.INFO)
37
+ PrettyOutput.print("\n2. Set environment variable:", OutputType.INFO)
38
+ PrettyOutput.print(" Method 1: Create or edit ~/.jarvis_env file:", OutputType.INFO)
39
39
  PrettyOutput.print(" echo 'KIMI_API_KEY=your_key_here' > ~/.jarvis_env", OutputType.INFO)
40
- PrettyOutput.print("\n 方法 2: 直接设置环境变量:", OutputType.INFO)
40
+ PrettyOutput.print("\n Method 2: Set environment variable directly:", OutputType.INFO)
41
41
  PrettyOutput.print(" export KIMI_API_KEY=your_key_here", OutputType.INFO)
42
- PrettyOutput.print("\n设置完成后重新运行 Jarvis", OutputType.INFO)
43
- PrettyOutput.print("KIMI_API_KEY未设置", OutputType.WARNING)
42
+ PrettyOutput.print("\nAfter setting, run Jarvis again.", OutputType.INFO)
43
+ PrettyOutput.print("KIMI_API_KEY is not set", OutputType.WARNING)
44
44
  self.auth_header = f"Bearer {self.api_key}"
45
45
  self.chat_id = ""
46
46
  self.uploaded_files = [] # 存储已上传文件的信息
@@ -48,18 +48,18 @@ class KimiModel(BasePlatform):
48
48
  self.system_message = ""
49
49
 
50
50
  def set_system_message(self, message: str):
51
- """设置系统消息"""
51
+ """Set system message"""
52
52
  self.system_message = message
53
53
 
54
54
  def set_model_name(self, model_name: str):
55
- """设置模型名称"""
55
+ """Set model name"""
56
56
  pass
57
57
 
58
58
  def _create_chat(self) -> bool:
59
- """创建新的对话会话"""
59
+ """Create a new chat session"""
60
60
  url = "https://kimi.moonshot.cn/api/chat"
61
61
  payload = json.dumps({
62
- "name": "未命名会话",
62
+ "name": "Unnamed session",
63
63
  "is_example": False,
64
64
  "kimiplus_id": "kimi"
65
65
  })
@@ -76,7 +76,7 @@ class KimiModel(BasePlatform):
76
76
  return False
77
77
 
78
78
  def _get_presigned_url(self, filename: str, action: str) -> Dict:
79
- """获取预签名上传URL"""
79
+ """Get presigned upload URL"""
80
80
  url = "https://kimi.moonshot.cn/api/pre-sign-url"
81
81
 
82
82
 
@@ -95,7 +95,7 @@ class KimiModel(BasePlatform):
95
95
  return response.json()
96
96
 
97
97
  def _upload_file(self, file_path: str, presigned_url: str) -> bool:
98
- """上传文件到预签名URL"""
98
+ """Upload file to presigned URL"""
99
99
  try:
100
100
  with open(file_path, 'rb') as f:
101
101
  content = f.read()
@@ -106,7 +106,7 @@ class KimiModel(BasePlatform):
106
106
  return False
107
107
 
108
108
  def _get_file_info(self, file_data: Dict, name: str, file_type: str) -> Dict:
109
- """获取文件信息"""
109
+ """Get file information"""
110
110
  url = "https://kimi.moonshot.cn/api/file"
111
111
  payload = json.dumps({
112
112
  "type": file_type,
@@ -125,7 +125,7 @@ class KimiModel(BasePlatform):
125
125
  return response.json()
126
126
 
127
127
  def _wait_for_parse(self, file_id: str) -> bool:
128
- """等待文件解析完成"""
128
+ """Wait for file parsing to complete"""
129
129
  url = "https://kimi.moonshot.cn/api/file/parse_process"
130
130
  headers = {
131
131
  'Authorization': self.auth_header,
@@ -163,7 +163,7 @@ class KimiModel(BasePlatform):
163
163
 
164
164
  return False
165
165
  def upload_files(self, file_list: List[str]) -> List[Dict]:
166
- """上传文件列表并返回文件信息"""
166
+ """Upload file list and return file information"""
167
167
  if not file_list:
168
168
  return []
169
169
 
@@ -209,7 +209,7 @@ class KimiModel(BasePlatform):
209
209
  return uploaded_files
210
210
 
211
211
  def chat(self, message: str) -> str:
212
- """发送消息并获取响应"""
212
+ """Send message and get response"""
213
213
  if not self.chat_id:
214
214
  if not self._create_chat():
215
215
  raise Exception("Failed to create chat session")
@@ -353,7 +353,7 @@ class KimiModel(BasePlatform):
353
353
  raise Exception(f"Chat failed: {str(e)}")
354
354
 
355
355
  def delete_chat(self) -> bool:
356
- """删除当前会话"""
356
+ """Delete current session"""
357
357
  if not self.chat_id:
358
358
  return True # 如果没有会话ID,视为删除成功
359
359
 
@@ -376,11 +376,11 @@ class KimiModel(BasePlatform):
376
376
  return False
377
377
 
378
378
  def reset(self):
379
- """重置对话"""
379
+ """Reset chat"""
380
380
  self.chat_id = ""
381
381
  self.uploaded_files = []
382
382
  self.first_chat = True # 重置first_chat标记
383
383
 
384
384
  def name(self) -> str:
385
- """模型名称"""
385
+ """Model name"""
386
386
  return "kimi"
jarvis/models/ollama.py CHANGED
@@ -6,56 +6,56 @@ import os
6
6
  import json
7
7
 
8
8
  class OllamaPlatform(BasePlatform):
9
- """Ollama 平台实现"""
9
+ """Ollama platform implementation"""
10
10
 
11
11
  platform_name = "ollama"
12
12
 
13
13
  def __init__(self):
14
- """初始化模型"""
14
+ """Initialize model"""
15
15
  super().__init__()
16
16
 
17
- # 检查环境变量并提供帮助信息
17
+ # Check environment variables and provide help information
18
18
  self.api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434")
19
19
  self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-r1:1.5b"
20
20
 
21
- # 检查 Ollama 服务是否可用
21
+ # Check if Ollama service is available
22
22
  try:
23
23
  response = requests.get(f"{self.api_base}/api/tags")
24
24
  response.raise_for_status()
25
25
  available_models = [model["name"] for model in response.json().get("models", [])]
26
26
 
27
27
  if not available_models:
28
- PrettyOutput.print("\n需要先下载 Ollama 模型才能使用:", OutputType.INFO)
29
- PrettyOutput.print("1. 安装 Ollama: https://ollama.ai", OutputType.INFO)
30
- PrettyOutput.print("2. 下载模型:", OutputType.INFO)
28
+ PrettyOutput.print("\nNeed to download Ollama model first to use:", OutputType.INFO)
29
+ PrettyOutput.print("1. Install Ollama: https://ollama.ai", OutputType.INFO)
30
+ PrettyOutput.print("2. Download model:", OutputType.INFO)
31
31
  PrettyOutput.print(f" ollama pull {self.model_name}", OutputType.INFO)
32
- PrettyOutput.print("Ollama没有可用的模型", OutputType.WARNING)
32
+ PrettyOutput.print("Ollama has no available models", OutputType.WARNING)
33
33
 
34
34
  except requests.exceptions.ConnectionError:
35
- PrettyOutput.print("\nOllama 服务未启动或无法连接", OutputType.WARNING)
36
- PrettyOutput.print("请确保已经:", OutputType.INFO)
37
- PrettyOutput.print("1. 安装了 Ollama: https://ollama.ai", OutputType.INFO)
38
- PrettyOutput.print("2. 启动了 Ollama 服务", OutputType.INFO)
39
- PrettyOutput.print("3. 服务地址配置正确 (默认: http://localhost:11434)", OutputType.INFO)
35
+ PrettyOutput.print("\nOllama service is not started or cannot be connected", OutputType.WARNING)
36
+ PrettyOutput.print("Please ensure that you have:", OutputType.INFO)
37
+ PrettyOutput.print("1. Installed Ollama: https://ollama.ai", OutputType.INFO)
38
+ PrettyOutput.print("2. Started Ollama service", OutputType.INFO)
39
+ PrettyOutput.print("3. Service address configured correctly (default: http://localhost:11434)", OutputType.INFO)
40
40
 
41
41
 
42
42
  self.messages = []
43
43
  self.system_message = ""
44
44
 
45
45
  def get_model_list(self) -> List[Tuple[str, str]]:
46
- """获取模型列表"""
46
+ """Get model list"""
47
47
  response = requests.get(f"{self.api_base}/api/tags")
48
48
  response.raise_for_status()
49
49
  return [(model["name"], "") for model in response.json().get("models", [])]
50
50
 
51
51
  def set_model_name(self, model_name: str):
52
- """设置模型名称"""
52
+ """Set model name"""
53
53
  self.model_name = model_name
54
54
 
55
55
  def chat(self, message: str) -> str:
56
- """执行对话"""
56
+ """Execute conversation"""
57
57
  try:
58
- # 构建消息列表
58
+ # Build message list
59
59
  messages = []
60
60
  if self.system_message:
61
61
  messages.append({"role": "system", "content": self.system_message})
@@ -102,31 +102,31 @@ class OllamaPlatform(BasePlatform):
102
102
  return full_response
103
103
 
104
104
  except Exception as e:
105
- PrettyOutput.print(f"对话失败: {str(e)}", OutputType.ERROR)
105
+ PrettyOutput.print(f"Chat failed: {str(e)}", OutputType.ERROR)
106
106
  raise Exception(f"Chat failed: {str(e)}")
107
107
 
108
108
  def upload_files(self, file_list: List[str]) -> List[Dict]:
109
- """上传文件 (Ollama 不支持文件上传)"""
110
- PrettyOutput.print("Ollama 不支持文件上传", output_type=OutputType.WARNING)
109
+ """Upload files (Ollama does not support file upload)"""
110
+ PrettyOutput.print("Ollama does not support file upload", output_type=OutputType.WARNING)
111
111
  return []
112
112
 
113
113
  def reset(self):
114
- """重置模型状态"""
114
+ """Reset model state"""
115
115
  self.messages = []
116
116
  if self.system_message:
117
117
  self.messages.append({"role": "system", "content": self.system_message})
118
118
 
119
119
  def name(self) -> str:
120
- """返回模型名称"""
120
+ """Return model name"""
121
121
  return self.model_name
122
122
 
123
123
  def delete_chat(self) -> bool:
124
- """删除当前聊天会话"""
124
+ """Delete current chat session"""
125
125
  self.reset()
126
126
  return True
127
127
 
128
128
  def set_system_message(self, message: str):
129
- """设置系统消息"""
129
+ """Set system message"""
130
130
  self.system_message = message
131
131
  self.reset() # 重置会话以应用新的系统消息
132
132
 
@@ -136,10 +136,10 @@ if __name__ == "__main__":
136
136
  ollama = OllamaPlatform()
137
137
  while True:
138
138
  try:
139
- message = input("\n输入问题(Ctrl+C退出): ")
140
- ollama.chat(message)
139
+ message = input("\nInput question (Ctrl+C to exit): ")
140
+ ollama.chat_until_success(message)
141
141
  except KeyboardInterrupt:
142
- print("\n再见!")
142
+ print("\nGoodbye!")
143
143
  break
144
144
  except Exception as e:
145
- PrettyOutput.print(f"程序异常退出: {str(e)}", OutputType.ERROR)
145
+ PrettyOutput.print(f"Program exited with an exception: {str(e)}", OutputType.ERROR)
jarvis/models/openai.py CHANGED
@@ -5,35 +5,33 @@ from jarvis.models.base import BasePlatform
5
5
  from jarvis.utils import PrettyOutput, OutputType
6
6
 
7
7
  class OpenAIModel(BasePlatform):
8
- """DeepSeek模型实现"""
9
-
10
8
  platform_name = "openai"
11
9
 
12
10
  def upload_files(self, file_list: List[str]):
13
- """上传文件"""
14
- PrettyOutput.print("OpenAI 不支持上传文件", OutputType.WARNING)
11
+ """Upload files"""
12
+ PrettyOutput.print("OpenAI does not support file upload", OutputType.WARNING)
15
13
 
16
14
  def __init__(self):
17
15
  """
18
- 初始化DeepSeek模型
16
+ Initialize OpenAI model
19
17
  """
20
18
  super().__init__()
21
19
  self.system_message = ""
22
20
  self.api_key = os.getenv("OPENAI_API_KEY")
23
21
  if not self.api_key:
24
- PrettyOutput.print("\n需要设置以下环境变量才能使用 OpenAI 模型:", OutputType.INFO)
25
- PrettyOutput.print(" • OPENAI_API_KEY: API 密钥", OutputType.INFO)
26
- PrettyOutput.print(" • OPENAI_API_BASE: (可选) API 基础地址,默认使用 https://api.openai.com/v1", OutputType.INFO)
27
- PrettyOutput.print("\n可以通过以下方式设置:", OutputType.INFO)
28
- PrettyOutput.print("1. 创建或编辑 ~/.jarvis_env 文件:", OutputType.INFO)
22
+ PrettyOutput.print("\nNeed to set the following environment variables to use OpenAI model:", OutputType.INFO)
23
+ PrettyOutput.print(" • OPENAI_API_KEY: API key", OutputType.INFO)
24
+ PrettyOutput.print(" • OPENAI_API_BASE: (optional) API base address, default using https://api.openai.com/v1", OutputType.INFO)
25
+ PrettyOutput.print("\nYou can set them in the following ways:", OutputType.INFO)
26
+ PrettyOutput.print("1. Create or edit ~/.jarvis_env file:", OutputType.INFO)
29
27
  PrettyOutput.print(" OPENAI_API_KEY=your_api_key", OutputType.INFO)
30
28
  PrettyOutput.print(" OPENAI_API_BASE=your_api_base", OutputType.INFO)
31
29
  PrettyOutput.print(" OPENAI_MODEL_NAME=your_model_name", OutputType.INFO)
32
- PrettyOutput.print("\n2. 或者直接设置环境变量:", OutputType.INFO)
30
+ PrettyOutput.print("\n2. Or set the environment variables directly:", OutputType.INFO)
33
31
  PrettyOutput.print(" export OPENAI_API_KEY=your_api_key", OutputType.INFO)
34
32
  PrettyOutput.print(" export OPENAI_API_BASE=your_api_base", OutputType.INFO)
35
33
  PrettyOutput.print(" export OPENAI_MODEL_NAME=your_model_name", OutputType.INFO)
36
- PrettyOutput.print("OPENAI_API_KEY未设置", OutputType.WARNING)
34
+ PrettyOutput.print("OPENAI_API_KEY is not set", OutputType.WARNING)
37
35
 
38
36
  self.base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
39
37
  self.model_name = os.getenv("JARVIS_MODEL") or "gpt-4o"
@@ -47,28 +45,28 @@ class OpenAIModel(BasePlatform):
47
45
  self.system_message = ""
48
46
 
49
47
  def get_model_list(self) -> List[Tuple[str, str]]:
50
- """获取模型列表"""
48
+ """Get model list"""
51
49
  return []
52
50
 
53
51
  def set_model_name(self, model_name: str):
54
- """设置模型名称"""
52
+ """Set model name"""
55
53
 
56
54
  self.model_name = model_name
57
55
 
58
56
  def set_system_message(self, message: str):
59
- """设置系统消息"""
57
+ """Set system message"""
60
58
  self.system_message = message
61
59
  self.messages.append({"role": "system", "content": self.system_message})
62
60
 
63
61
  def chat(self, message: str) -> str:
64
- """执行对话"""
62
+ """Execute conversation"""
65
63
  try:
66
64
 
67
- # 添加用户消息到历史记录
65
+ # Add user message to history
68
66
  self.messages.append({"role": "user", "content": message})
69
67
 
70
68
  response = self.client.chat.completions.create(
71
- model=self.model_name, # 使用配置的模型名称
69
+ model=self.model_name, # Use the configured model name
72
70
  messages=self.messages, # type: ignore
73
71
  stream=True
74
72
  ) # type: ignore
@@ -85,28 +83,28 @@ class OpenAIModel(BasePlatform):
85
83
  if not self.suppress_output:
86
84
  PrettyOutput.print_stream_end()
87
85
 
88
- # 添加助手回复到历史记录
86
+ # Add assistant reply to history
89
87
  self.messages.append({"role": "assistant", "content": full_response})
90
88
 
91
89
  return full_response
92
90
 
93
91
  except Exception as e:
94
- PrettyOutput.print(f"对话失败: {str(e)}", OutputType.ERROR)
92
+ PrettyOutput.print(f"Chat failed: {str(e)}", OutputType.ERROR)
95
93
  raise Exception(f"Chat failed: {str(e)}")
96
94
 
97
95
  def name(self) -> str:
98
- """返回模型名称"""
96
+ """Return model name"""
99
97
  return self.model_name
100
98
 
101
99
  def reset(self):
102
- """重置模型状态"""
103
- # 清空对话历史,只保留system message
100
+ """Reset model state"""
101
+ # Clear conversation history, only keep system message
104
102
  if self.system_message:
105
103
  self.messages = [{"role": "system", "content": self.system_message}]
106
104
  else:
107
105
  self.messages = []
108
106
 
109
107
  def delete_chat(self)->bool:
110
- """删除对话"""
108
+ """Delete conversation"""
111
109
  self.reset()
112
110
  return True
jarvis/models/oyi.py CHANGED
@@ -13,7 +13,7 @@ class OyiModel(BasePlatform):
13
13
  BASE_URL = "https://api-10086.rcouyi.com"
14
14
 
15
15
  def get_model_list(self) -> List[Tuple[str, str]]:
16
- """获取模型列表"""
16
+ """Get model list"""
17
17
  return [(name,info['desc']) for name,info in self.models.items()]
18
18
 
19
19
  def __init__(self):
@@ -28,15 +28,15 @@ class OyiModel(BasePlatform):
28
28
 
29
29
  self.token = os.getenv("OYI_API_KEY")
30
30
  if not self.token:
31
- PrettyOutput.print("OYI_API_KEY未设置", OutputType.WARNING)
31
+ PrettyOutput.print("OYI_API_KEY is not set", OutputType.WARNING)
32
32
 
33
33
  self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
34
34
  if self.model_name not in [m.split()[0] for m in self.get_available_models()]:
35
- PrettyOutput.print(f"警告: 当前选择的模型 {self.model_name} 不在可用列表中", OutputType.WARNING)
35
+ PrettyOutput.print(f"Warning: The selected model {self.model_name} is not in the available list", OutputType.WARNING)
36
36
 
37
37
 
38
38
  def set_model_name(self, model_name: str):
39
- """设置模型名称"""
39
+ """Set model name"""
40
40
 
41
41
  self.model_name = model_name
42
42
 
@@ -54,7 +54,7 @@ class OyiModel(BasePlatform):
54
54
  payload = {
55
55
  "id": 0,
56
56
  "roleId": 0,
57
- "title": "新对话",
57
+ "title": "New conversation",
58
58
  "isLock": False,
59
59
  "systemMessage": "",
60
60
  "params": json.dumps({
@@ -82,14 +82,14 @@ class OyiModel(BasePlatform):
82
82
  self.conversation = data
83
83
  return True
84
84
  else:
85
- PrettyOutput.print(f"创建会话失败: {data['message']}", OutputType.ERROR)
85
+ PrettyOutput.print(f"Create conversation failed: {data['message']}", OutputType.ERROR)
86
86
  return False
87
87
  else:
88
- PrettyOutput.print(f"创建会话失败: {response.status_code}", OutputType.ERROR)
88
+ PrettyOutput.print(f"Create conversation failed: {response.status_code}", OutputType.ERROR)
89
89
  return False
90
90
 
91
91
  except Exception as e:
92
- PrettyOutput.print(f"创建会话异常: {str(e)}", OutputType.ERROR)
92
+ PrettyOutput.print(f"Create conversation failed: {str(e)}", OutputType.ERROR)
93
93
  return False
94
94
 
95
95
  def set_system_message(self, message: str):
@@ -155,13 +155,13 @@ class OyiModel(BasePlatform):
155
155
  )
156
156
 
157
157
  if response.status_code != 200:
158
- error_msg = f"聊天请求失败: {response.status_code}"
158
+ error_msg = f"Chat request failed: {response.status_code}"
159
159
  PrettyOutput.print(error_msg, OutputType.ERROR)
160
160
  raise Exception(error_msg)
161
161
 
162
162
  data = response.json()
163
163
  if data['code'] != 200 or data['type'] != 'success':
164
- error_msg = f"聊天失败: {data.get('message', '未知错误')}"
164
+ error_msg = f"Chat failed: {data.get('message', 'Unknown error')}"
165
165
  PrettyOutput.print(error_msg, OutputType.ERROR)
166
166
  raise Exception(error_msg)
167
167
 
@@ -179,12 +179,12 @@ class OyiModel(BasePlatform):
179
179
  self.messages.append({"role": "assistant", "content": response.text})
180
180
  return response.text
181
181
  else:
182
- error_msg = f"获取响应失败: {response.status_code}"
182
+ error_msg = f"Get response failed: {response.status_code}"
183
183
  PrettyOutput.print(error_msg, OutputType.ERROR)
184
184
  raise Exception(error_msg)
185
185
 
186
186
  except Exception as e:
187
- PrettyOutput.print(f"聊天异常: {str(e)}", OutputType.ERROR)
187
+ PrettyOutput.print(f"Chat failed: {str(e)}", OutputType.ERROR)
188
188
  raise e
189
189
 
190
190
  def name(self) -> str:
@@ -225,16 +225,16 @@ class OyiModel(BasePlatform):
225
225
  self.reset()
226
226
  return True
227
227
  else:
228
- error_msg = f"删除会话失败: {data.get('message', '未知错误')}"
228
+ error_msg = f"Delete conversation failed: {data.get('message', 'Unknown error')}"
229
229
  PrettyOutput.print(error_msg, OutputType.ERROR)
230
230
  return False
231
231
  else:
232
- error_msg = f"删除会话请求失败: {response.status_code}"
232
+ error_msg = f"Delete conversation request failed: {response.status_code}"
233
233
  PrettyOutput.print(error_msg, OutputType.ERROR)
234
234
  return False
235
235
 
236
236
  except Exception as e:
237
- PrettyOutput.print(f"删除会话异常: {str(e)}", OutputType.ERROR)
237
+ PrettyOutput.print(f"Delete conversation failed: {str(e)}", OutputType.ERROR)
238
238
  return False
239
239
 
240
240
  def upload_files(self, file_list: List[str]) -> List[Dict]:
@@ -250,7 +250,7 @@ class OyiModel(BasePlatform):
250
250
  # 检查当前模型是否支持文件上传
251
251
  model_info = self.models.get(self.model_name)
252
252
  if not model_info or not model_info.get('uploadFile', False):
253
- PrettyOutput.print(f"当前模型 {self.model_name} 不支持文件上传", OutputType.WARNING)
253
+ PrettyOutput.print(f"The current model {self.model_name} does not support file upload", OutputType.WARNING)
254
254
  return []
255
255
 
256
256
  headers = {
@@ -266,7 +266,7 @@ class OyiModel(BasePlatform):
266
266
  # 检查文件类型
267
267
  file_type = mimetypes.guess_type(file_path)[0]
268
268
  if not file_type or not file_type.startswith(('image/', 'text/', 'application/')):
269
- PrettyOutput.print(f"文件类型不支持: {file_type}", OutputType.ERROR)
269
+ PrettyOutput.print(f"The file type {file_type} is not supported", OutputType.ERROR)
270
270
  continue
271
271
 
272
272
  with open(file_path, 'rb') as f:
@@ -285,22 +285,22 @@ class OyiModel(BasePlatform):
285
285
  if data.get('code') == 200:
286
286
  self.files.append(data)
287
287
  else:
288
- PrettyOutput.print(f"文件上传失败: {data.get('message')}", OutputType.ERROR)
288
+ PrettyOutput.print(f"File upload failed: {data.get('message')}", OutputType.ERROR)
289
289
  return []
290
290
  else:
291
- PrettyOutput.print(f"文件上传失败: {response.status_code}", OutputType.ERROR)
291
+ PrettyOutput.print(f"File upload failed: {response.status_code}", OutputType.ERROR)
292
292
  return []
293
293
 
294
294
  return self.files
295
295
  except Exception as e:
296
- PrettyOutput.print(f"文件上传异常: {str(e)}", OutputType.ERROR)
296
+ PrettyOutput.print(f"File upload failed: {str(e)}", OutputType.ERROR)
297
297
  return []
298
298
 
299
299
  def get_available_models(self) -> List[str]:
300
- """获取可用的模型列表
300
+ """Get available model list
301
301
 
302
302
  Returns:
303
- List[str]: 可用模型名称列表
303
+ List[str]: Available model name list
304
304
  """
305
305
  try:
306
306
  if self.models:
@@ -320,7 +320,7 @@ class OyiModel(BasePlatform):
320
320
  )
321
321
 
322
322
  if response.status_code != 200:
323
- PrettyOutput.print(f"获取模型列表失败: {response.status_code}", OutputType.ERROR)
323
+ PrettyOutput.print(f"Get model list failed: {response.status_code}", OutputType.ERROR)
324
324
  return []
325
325
 
326
326
  data = response.json()
@@ -357,12 +357,12 @@ class OyiModel(BasePlatform):
357
357
 
358
358
  # 添加文件上传支持标记
359
359
  if model.get('uploadFile'):
360
- model_str += " [支持文件上传]"
360
+ model_str += " [Support file upload]"
361
361
  model['desc'] = model_str
362
362
  models.append(model_name)
363
363
 
364
364
  return sorted(models)
365
365
 
366
366
  except Exception as e:
367
- PrettyOutput.print(f"获取模型列表异常: {str(e)}", OutputType.WARNING)
367
+ PrettyOutput.print(f"Get model list failed: {str(e)}", OutputType.WARNING)
368
368
  return []