jarvis-ai-assistant 0.1.211__py3-none-any.whl → 0.1.213__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.1.211"
4
+ __version__ = "0.1.213"
@@ -0,0 +1,263 @@
1
+ import os
2
+ from typing import Generator, List, Tuple
3
+ from jarvis.jarvis_platform.base import BasePlatform
4
+ import json
5
+
6
+ from jarvis.jarvis_utils import http
7
+ from jarvis.jarvis_utils.output import OutputType, PrettyOutput
8
+ from jarvis.jarvis_utils.utils import while_success
9
+
10
+
11
+ class AI8Model(BasePlatform):
12
+ """AI8 model implementation"""
13
+
14
+ platform_name = "ai8"
15
+ BASE_URL = "https://ai8.rcouyi.com"
16
+
17
+ def get_model_list(self) -> List[Tuple[str, str]]:
18
+ """获取模型列表"""
19
+ self.get_available_models()
20
+ return [(name, info["desc"]) for name, info in self.models.items()]
21
+
22
+ def __init__(self):
23
+ """Initialize model"""
24
+ super().__init__()
25
+ self.system_prompt = ""
26
+ self.conversation = {}
27
+ self.models = {} # 存储模型信息
28
+
29
+ self.token = os.getenv("AI8_API_KEY")
30
+ if not self.token:
31
+ PrettyOutput.print("未设置 AI8_API_KEY", OutputType.WARNING)
32
+
33
+ self.headers = {
34
+ "Authorization": self.token,
35
+ "Content-Type": "application/json",
36
+ "Accept": "application/json, text/plain, */*",
37
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
38
+ "X-APP-VERSION": "2.3.0",
39
+ "Origin": self.BASE_URL,
40
+ "Referer": f"{self.BASE_URL}/chat?_userMenuKey=chat",
41
+ "Sec-Fetch-Site": "same-origin",
42
+ "Sec-Fetch-Mode": "cors",
43
+ "Sec-Fetch-Dest": "empty",
44
+ }
45
+
46
+ self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
47
+ if self.model_name not in self.get_available_models():
48
+ PrettyOutput.print(
49
+ f"警告: 选择的模型 {self.model_name} 不在可用列表中", OutputType.WARNING
50
+ )
51
+
52
+ def set_model_name(self, model_name: str):
53
+ """Set model name"""
54
+
55
+ self.model_name = model_name
56
+
57
+ def create_conversation(self) -> bool:
58
+ """Create a new conversation"""
59
+ try:
60
+
61
+ # 1. 创建会话
62
+ response = while_success(
63
+ lambda: http.post(
64
+ f"{self.BASE_URL}/api/chat/session", headers=self.headers, json={}
65
+ ),
66
+ sleep_time=5,
67
+ )
68
+
69
+ data = response.json()
70
+ if data["code"] != 0:
71
+ PrettyOutput.print(
72
+ f"创建会话失败: {data.get('msg', '未知错误')}", OutputType.WARNING
73
+ )
74
+ return False
75
+
76
+ self.conversation = data["data"]
77
+
78
+ # 2. 更新会话设置
79
+ session_data = {
80
+ **self.conversation,
81
+ "model": self.model_name,
82
+ "contextCount": 65536,
83
+ "prompt": self.system_prompt,
84
+ "plugins": [],
85
+ "localPlugins": None,
86
+ "useAppId": 0,
87
+ }
88
+
89
+ response = while_success(
90
+ lambda: http.put(
91
+ f"{self.BASE_URL}/api/chat/session/{self.conversation['id']}", # type: ignore
92
+ headers=self.headers,
93
+ json=session_data,
94
+ ),
95
+ sleep_time=5,
96
+ )
97
+
98
+ data = response.json()
99
+ if data["code"] == 0:
100
+ self.conversation = data["data"]
101
+ return True
102
+ else:
103
+ PrettyOutput.print(
104
+ f"更新会话设置失败: {data.get('msg', '未知错误')}",
105
+ OutputType.WARNING,
106
+ )
107
+ return False
108
+
109
+ except Exception as e:
110
+ PrettyOutput.print(f"创建会话失败: {str(e)}", OutputType.ERROR)
111
+ return False
112
+
113
+ def set_system_prompt(self, message: str):
114
+ """Set system message"""
115
+ self.system_prompt = message
116
+
117
+ def chat(self, message: str) -> Generator[str, None, None]:
118
+ """Execute conversation"""
119
+ try:
120
+
121
+ # 确保有会话ID
122
+ if not self.conversation:
123
+ if not self.create_conversation():
124
+ raise Exception("Failed to create conversation")
125
+
126
+ payload = {
127
+ "text": message,
128
+ "sessionId": self.conversation["id"] if self.conversation else None,
129
+ "files": [],
130
+ }
131
+
132
+ # 使用stream_post进行流式请求
133
+ response_stream = while_success(
134
+ lambda: http.stream_post(
135
+ f"{self.BASE_URL}/api/chat/completions",
136
+ headers=self.headers,
137
+ json=payload,
138
+ ),
139
+ sleep_time=5,
140
+ )
141
+
142
+ # 处理流式响应
143
+ for chunk in response_stream:
144
+ if chunk:
145
+ try:
146
+ line = chunk.decode("utf-8")
147
+ if line.startswith("data: "):
148
+ try:
149
+ data = json.loads(line[6:])
150
+ if data.get("type") == "string":
151
+ chunk_data = data.get("data", "")
152
+ if chunk_data:
153
+ yield chunk_data
154
+
155
+ except json.JSONDecodeError:
156
+ continue
157
+
158
+ except UnicodeDecodeError:
159
+ continue
160
+
161
+ return None
162
+
163
+ except Exception as e:
164
+ PrettyOutput.print(f"对话异常: {str(e)}", OutputType.ERROR)
165
+ raise e
166
+
167
+ def name(self) -> str:
168
+ """Return model name"""
169
+ return self.model_name
170
+
171
+ def delete_chat(self) -> bool:
172
+ """Delete current chat session"""
173
+ try:
174
+ if not self.conversation:
175
+ return True
176
+
177
+ response = while_success(
178
+ lambda: http.delete(
179
+ f"{self.BASE_URL}/api/chat/session/{self.conversation['id']}", # type: ignore
180
+ headers=self.headers,
181
+ ),
182
+ sleep_time=5,
183
+ )
184
+
185
+ data = response.json()
186
+ if data["code"] == 0:
187
+ self.conversation = None
188
+ return True
189
+ else:
190
+ error_msg = f"删除会话失败: {data.get('msg', '未知错误')}"
191
+ PrettyOutput.print(error_msg, OutputType.WARNING)
192
+ return False
193
+
194
+ except Exception as e:
195
+ PrettyOutput.print(f"删除会话失败: {str(e)}", OutputType.ERROR)
196
+ return False
197
+
198
+ def get_available_models(self) -> List[str]:
199
+ """Get available model list
200
+
201
+ Returns:
202
+ List[str]: Available model name list
203
+ """
204
+ try:
205
+ if self.models:
206
+ return list(self.models.keys())
207
+
208
+ response = while_success(
209
+ lambda: http.get(
210
+ f"{self.BASE_URL}/api/chat/tmpl", headers=self.headers
211
+ ),
212
+ sleep_time=5,
213
+ )
214
+
215
+ data = response.json()
216
+ if data["code"] != 0:
217
+ PrettyOutput.print(
218
+ f"获取模型列表失败: {data.get('msg', '未知错误')}",
219
+ OutputType.WARNING,
220
+ )
221
+ return []
222
+
223
+ # 保存模型信息
224
+ self.models = {model["value"]: model for model in data["data"]["models"]}
225
+
226
+ for model in self.models.values():
227
+ # 添加标签
228
+ model_str = f"{model['label']}"
229
+
230
+ # 添加特性标记
231
+ features = []
232
+ if model["attr"].get("multimodal"):
233
+ features.append("Multimodal")
234
+ if model["attr"].get("plugin"):
235
+ features.append("Plugin support")
236
+ if model["attr"].get("onlyImg"):
237
+ features.append("Image support")
238
+ if model["attr"].get("tag"):
239
+ features.append(model["attr"]["tag"])
240
+ if model["attr"].get("integral"):
241
+ features.append(model["attr"]["integral"])
242
+ # 添加备注
243
+ if model["attr"].get("note"):
244
+ model_str += f" - {model['attr']['note']}"
245
+ if features:
246
+ model_str += f" [{'|'.join(features)}]"
247
+
248
+ model["desc"] = model_str
249
+
250
+ return list(self.models.keys())
251
+
252
+ except Exception as e:
253
+ PrettyOutput.print(f"获取模型列表失败: {str(e)}", OutputType.ERROR)
254
+ return []
255
+
256
+ def support_upload_files(self) -> bool:
257
+ return False
258
+
259
+ def support_web(self) -> bool:
260
+ return False
261
+
262
+ def upload_files(self, file_list: List[str]) -> bool:
263
+ return False
@@ -166,6 +166,9 @@ class BasePlatform(ABC):
166
166
  result: str = while_true(
167
167
  lambda: while_success(lambda: self._chat(message), 5), 5
168
168
  )
169
+ from jarvis.jarvis_utils.globals import set_last_message
170
+
171
+ set_last_message(result)
169
172
  return result
170
173
  finally:
171
174
  set_in_chat(False)
@@ -151,34 +151,43 @@ class KimiModel(BasePlatform):
151
151
  retry_count = 0
152
152
 
153
153
  while retry_count < max_retries:
154
- payload = json.dumps({"ids": [file_id]}, ensure_ascii=False)
155
- response = while_success(
156
- lambda: http.post(url, headers=headers, data=payload, stream=True),
154
+ payload = {"ids": [file_id]}
155
+ response_stream = while_success(
156
+ lambda: http.stream_post(url, headers=headers, json=payload),
157
157
  sleep_time=5,
158
158
  )
159
159
 
160
- for line in response.iter_lines():
161
- if not line:
162
- continue
163
-
164
- # httpx 返回字符串,requests 返回字节,需要兼容处理
165
- if isinstance(line, bytes):
166
- line = line.decode("utf-8")
167
- else:
168
- line = str(line)
169
-
170
- if not line.startswith("data: "):
171
- continue
160
+ response_data = b""
161
+
162
+ # 处理流式响应
163
+ for chunk in response_stream:
164
+ response_data += chunk
172
165
 
166
+ # 尝试解析SSE格式的数据
173
167
  try:
174
- data = json.loads(line[6:])
175
- if data.get("event") == "resp":
176
- status = data.get("file_info", {}).get("status")
177
- if status == "parsed":
178
- return True
179
- elif status == "failed":
180
- return False
181
- except json.JSONDecodeError:
168
+ # 查找完整的数据行
169
+ lines = response_data.decode("utf-8").split("\n")
170
+ response_data = b"" # 重置缓冲区
171
+
172
+ for line in lines:
173
+ if not line.strip():
174
+ continue
175
+
176
+ # SSE格式的行通常以"data: "开头
177
+ if line.startswith("data: "):
178
+ try:
179
+ data = json.loads(line[6:])
180
+ if data.get("event") == "resp":
181
+ status = data.get("file_info", {}).get("status")
182
+ if status == "parsed":
183
+ return True
184
+ elif status == "failed":
185
+ return False
186
+ except json.JSONDecodeError:
187
+ continue
188
+
189
+ except UnicodeDecodeError:
190
+ # 如果解码失败,继续累积数据
182
191
  continue
183
192
 
184
193
  retry_count += 1
@@ -284,34 +293,44 @@ class KimiModel(BasePlatform):
284
293
  }
285
294
 
286
295
  try:
287
- response = while_success(
288
- lambda: http.post(url, headers=headers, json=payload, stream=True),
296
+ # 使用新的stream_post接口发送消息请求,获取流式响应
297
+ response_stream = while_success(
298
+ lambda: http.stream_post(url, headers=headers, json=payload),
289
299
  sleep_time=5,
290
300
  )
291
- # 如果禁止输出,则静默处理
292
- for line in response.iter_lines():
293
- if not line:
294
- continue
295
-
296
- # httpx 返回字符串,requests 返回字节,需要兼容处理
297
- if isinstance(line, bytes):
298
- line = line.decode("utf-8")
299
- else:
300
- line = str(line)
301
-
302
- if not line.startswith("data: "):
303
- continue
304
-
301
+
302
+ response_data = b""
303
+
304
+ # 处理流式响应
305
+ for chunk in response_stream:
306
+ response_data += chunk
307
+
308
+ # 尝试解析SSE格式的数据
305
309
  try:
306
- data = json.loads(line[6:])
307
- event = data.get("event")
308
-
309
- if event == "cmpl":
310
- # 处理补全文本
311
- text = data.get("text", "")
312
- if text:
313
- yield text
314
- except json.JSONDecodeError:
310
+ # 查找完整的数据行
311
+ lines = response_data.decode("utf-8").split("\n")
312
+ response_data = b"" # 重置缓冲区
313
+
314
+ for line in lines:
315
+ if not line.strip():
316
+ continue
317
+
318
+ # SSE格式的行通常以"data: "开头
319
+ if line.startswith("data: "):
320
+ try:
321
+ data = json.loads(line[6:])
322
+ event = data.get("event")
323
+
324
+ if event == "cmpl":
325
+ # 处理补全文本
326
+ text = data.get("text", "")
327
+ if text:
328
+ yield text
329
+ except json.JSONDecodeError:
330
+ continue
331
+
332
+ except UnicodeDecodeError:
333
+ # 如果解码失败,继续累积数据
315
334
  continue
316
335
 
317
336
  return None
@@ -0,0 +1,307 @@
1
+ import mimetypes
2
+ import os
3
+ from typing import Dict, Generator, List, Tuple
4
+ from jarvis.jarvis_platform.base import BasePlatform
5
+ import json
6
+
7
+ from jarvis.jarvis_utils import http
8
+ from jarvis.jarvis_utils.output import OutputType, PrettyOutput
9
+ from jarvis.jarvis_utils.utils import while_success
10
+
11
+
12
+ class OyiModel(BasePlatform):
13
+ """Oyi model implementation"""
14
+
15
+ platform_name = "oyi"
16
+ BASE_URL = "https://api-10086.rcouyi.com"
17
+
18
+ def get_model_list(self) -> List[Tuple[str, str]]:
19
+ """Get model list"""
20
+ self.get_available_models()
21
+ return [(name, info["desc"]) for name, info in self.models.items()]
22
+
23
+ def __init__(self):
24
+ """Initialize model"""
25
+ super().__init__()
26
+ self.models = {}
27
+ self.messages = []
28
+ self.system_prompt = ""
29
+ self.conversation = None
30
+ self.first_chat = True
31
+
32
+ self.token = os.getenv("OYI_API_KEY")
33
+ if not self.token:
34
+ PrettyOutput.print("OYI_API_KEY 未设置", OutputType.WARNING)
35
+
36
+ self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-chat"
37
+ if self.model_name not in [m.split()[0] for m in self.get_available_models()]:
38
+ PrettyOutput.print(
39
+ f"警告: 选择的模型 {self.model_name} 不在可用列表中", OutputType.WARNING
40
+ )
41
+
42
+ def set_model_name(self, model_name: str):
43
+ """Set model name"""
44
+
45
+ self.model_name = model_name
46
+
47
+ def create_conversation(self) -> bool:
48
+ """Create a new conversation"""
49
+ try:
50
+ headers = {
51
+ "Authorization": f"Bearer {self.token}",
52
+ "Content-Type": "application/json",
53
+ "Accept": "application/json",
54
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
55
+ }
56
+
57
+ payload = {
58
+ "id": 0,
59
+ "roleId": 0,
60
+ "title": "New conversation",
61
+ "isLock": False,
62
+ "systemMessage": "",
63
+ "params": json.dumps(
64
+ {
65
+ "model": self.model_name,
66
+ "is_webSearch": True,
67
+ "message": [],
68
+ "systemMessage": None,
69
+ "requestMsgCount": 65536,
70
+ "temperature": 0.8,
71
+ "speechVoice": "Alloy",
72
+ "max_tokens": 8192,
73
+ "chatPluginIds": [],
74
+ }
75
+ ),
76
+ }
77
+
78
+ response = while_success(
79
+ lambda: http.post(
80
+ f"{self.BASE_URL}/chatapi/chat/save", headers=headers, json=payload
81
+ ),
82
+ sleep_time=5,
83
+ )
84
+
85
+ data = response.json()
86
+ if data["code"] == 200 and data["type"] == "success":
87
+ self.conversation = data
88
+ return True
89
+ else:
90
+ PrettyOutput.print(
91
+ f"创建会话失败: {data['message']}", OutputType.WARNING
92
+ )
93
+ return False
94
+
95
+ except Exception as e:
96
+ PrettyOutput.print(f"创建会话失败: {str(e)}", OutputType.ERROR)
97
+ return False
98
+
99
+ def set_system_prompt(self, message: str):
100
+ """Set system message"""
101
+ self.system_prompt = message
102
+
103
+ def chat(self, message: str) -> Generator[str, None, None]:
104
+ """Execute chat with the model
105
+
106
+ Args:
107
+ message: User input message
108
+
109
+ Returns:
110
+ str: Model response
111
+ """
112
+ try:
113
+ # 确保有会话ID
114
+ if not self.conversation:
115
+ if not self.create_conversation():
116
+ raise Exception("Failed to create conversation")
117
+
118
+ # 1. 发送消息
119
+ headers = {
120
+ "Authorization": f"Bearer {self.token}",
121
+ "Content-Type": "application/json",
122
+ "Accept": "application/json, text/plain, */*",
123
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
124
+ "Origin": "https://ai.rcouyi.com",
125
+ "Referer": "https://ai.rcouyi.com/",
126
+ }
127
+
128
+ payload = {
129
+ "topicId": (
130
+ self.conversation["result"]["id"] if self.conversation else None
131
+ ),
132
+ "messages": self.messages,
133
+ "content": message,
134
+ "contentFiles": [],
135
+ }
136
+
137
+ # 如果有上传的文件,添加到请求中
138
+ if self.first_chat:
139
+ message = self.system_prompt + "\n" + message
140
+ payload["content"] = message
141
+ self.first_chat = False
142
+
143
+ self.messages.append({"role": "user", "content": message})
144
+
145
+ # 发送消息
146
+ response = while_success(
147
+ lambda: http.post(
148
+ f"{self.BASE_URL}/chatapi/chat/message",
149
+ headers=headers,
150
+ json=payload,
151
+ ),
152
+ sleep_time=5,
153
+ )
154
+
155
+ data = response.json()
156
+ if data["code"] != 200 or data["type"] != "success":
157
+ error_msg = f"聊天失败: {data.get('message', '未知错误')}"
158
+ PrettyOutput.print(error_msg, OutputType.WARNING)
159
+ raise Exception(error_msg)
160
+
161
+ message_id = data["result"][-1]
162
+
163
+ # 获取响应内容
164
+ response = while_success(
165
+ lambda: http.stream_post(
166
+ f"{self.BASE_URL}/chatapi/chat/message/{message_id}",
167
+ headers=headers,
168
+ ),
169
+ sleep_time=5,
170
+ )
171
+
172
+ full_response = ""
173
+ bin = b""
174
+ for chunk in response:
175
+ if chunk:
176
+ bin += chunk
177
+ try:
178
+ text = bin.decode("utf-8")
179
+ except UnicodeDecodeError:
180
+ continue
181
+ full_response += text
182
+ bin = b""
183
+ yield text
184
+
185
+ self.messages.append({"role": "assistant", "content": full_response})
186
+ return None
187
+ except Exception as e:
188
+ PrettyOutput.print(f"聊天失败: {str(e)}", OutputType.ERROR)
189
+ raise e
190
+
191
+ def name(self) -> str:
192
+ """Return model name"""
193
+ return self.model_name
194
+
195
+ def delete_chat(self) -> bool:
196
+ """Delete current chat session"""
197
+ try:
198
+ if not self.conversation:
199
+ return True
200
+
201
+ headers = {
202
+ "Authorization": f"Bearer {self.token}",
203
+ "Content-Type": "application/json",
204
+ "Accept": "application/json, text/plain, */*",
205
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
206
+ "Origin": "https://ai.rcouyi.com",
207
+ "Referer": "https://ai.rcouyi.com/",
208
+ }
209
+
210
+ response = while_success(
211
+ lambda: http.post(
212
+ f"{self.BASE_URL}/chatapi/chat/{self.conversation['result']['id']}", # type: ignore
213
+ headers=headers,
214
+ json={},
215
+ ),
216
+ sleep_time=5,
217
+ )
218
+
219
+ data = response.json()
220
+ if data["code"] == 200 and data["type"] == "success":
221
+ self.messages = []
222
+ self.conversation = None
223
+ self.first_chat = True
224
+ return True
225
+ else:
226
+ error_msg = f"删除会话失败: {data.get('message', '未知错误')}"
227
+ PrettyOutput.print(error_msg, OutputType.WARNING)
228
+ return False
229
+
230
+ except Exception as e:
231
+ PrettyOutput.print(f"删除会话失败: {str(e)}", OutputType.ERROR)
232
+ return False
233
+
234
+ def get_available_models(self) -> List[str]:
235
+ """Get available model list
236
+
237
+ Returns:
238
+ List[str]: Available model name list
239
+ """
240
+ try:
241
+ if self.models:
242
+ return list(self.models.keys())
243
+
244
+ headers = {
245
+ "Content-Type": "application/json",
246
+ "Accept": "application/json, text/plain, */*",
247
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
248
+ "Origin": "https://ai.rcouyi.com",
249
+ "Referer": "https://ai.rcouyi.com/",
250
+ }
251
+
252
+ response = while_success(
253
+ lambda: http.get(
254
+ "https://ai.rcouyi.com/config/system.json", headers=headers
255
+ ),
256
+ sleep_time=5,
257
+ )
258
+
259
+ data = response.json()
260
+
261
+ # 保存模型信息
262
+ self.models = {
263
+ model["value"]: model
264
+ for model in data.get("model", [])
265
+ if model.get("enable", False) # 只保存启用的模型
266
+ }
267
+
268
+ # 格式化显示
269
+ models = []
270
+ for model in self.models.values():
271
+ # 基本信息
272
+ model_name = model["value"]
273
+ model_str = model["label"]
274
+
275
+ # 添加后缀标签
276
+ suffix = model.get("suffix", [])
277
+ if suffix:
278
+ # 处理新格式的suffix (字典列表)
279
+ if suffix and isinstance(suffix[0], dict):
280
+ suffix_str = ", ".join(s.get("tag", "") for s in suffix)
281
+ # 处理旧格式的suffix (字符串列表)
282
+ else:
283
+ suffix_str = ", ".join(str(s) for s in suffix)
284
+ model_str += f" ({suffix_str})"
285
+
286
+ # 添加描述或提示
287
+ info = model.get("tooltip") or model.get("description", "")
288
+ if info:
289
+ model_str += f" - {info}"
290
+
291
+ model["desc"] = model_str
292
+ models.append(model_name)
293
+
294
+ return sorted(models)
295
+
296
+ except Exception as e:
297
+ PrettyOutput.print(f"获取模型列表失败: {str(e)}", OutputType.WARNING)
298
+ return []
299
+
300
+ def support_upload_files(self) -> bool:
301
+ return False
302
+
303
+ def support_web(self) -> bool:
304
+ return False
305
+
306
+ def upload_files(self, file_list: List[str]) -> bool:
307
+ return False
@@ -1,7 +1,7 @@
1
1
  # -*- coding: utf-8 -*-
2
- """Jarvis Platform Manager Main Module.
2
+ """Jarvis平台管理器主模块。
3
3
 
4
- This module provides the main entry point for the Jarvis platform manager.
4
+ 该模块提供了Jarvis平台管理器的主要入口点。
5
5
  """
6
6
  import argparse
7
7
  import os
@@ -15,7 +15,7 @@ from jarvis.jarvis_platform_manager.service import start_service
15
15
 
16
16
 
17
17
  def list_platforms() -> None:
18
- """List all supported platforms and models."""
18
+ """列出所有支持的平台和模型。"""
19
19
  registry = PlatformRegistry.get_global_platform_registry()
20
20
  platforms = registry.get_available_platforms()
21
21
 
@@ -53,7 +53,7 @@ def list_platforms() -> None:
53
53
 
54
54
 
55
55
  def chat_with_model(platform_name: str, model_name: str, system_prompt: str) -> None:
56
- """Chat with specified platform and model."""
56
+ """与指定平台和模型进行对话。"""
57
57
  registry = PlatformRegistry.get_global_platform_registry()
58
58
  conversation_history: List[Dict[str, str]] = [] # 存储对话记录
59
59
 
@@ -190,9 +190,13 @@ def chat_with_model(platform_name: str, model_name: str, system_prompt: str) ->
190
190
  for entry in conversation_history:
191
191
  file_obj.write(f"{entry['role']}: {entry['content']}\n\n")
192
192
 
193
- PrettyOutput.print(f"所有对话已保存到 {file_path}", OutputType.SUCCESS)
193
+ PrettyOutput.print(
194
+ f"所有对话已保存到 {file_path}", OutputType.SUCCESS
195
+ )
194
196
  except Exception as exc:
195
- PrettyOutput.print(f"保存所有对话失败: {str(exc)}", OutputType.ERROR)
197
+ PrettyOutput.print(
198
+ f"保存所有对话失败: {str(exc)}", OutputType.ERROR
199
+ )
196
200
  continue
197
201
 
198
202
  # Check if it is a shell command
@@ -242,13 +246,13 @@ def chat_with_model(platform_name: str, model_name: str, system_prompt: str) ->
242
246
 
243
247
 
244
248
  def validate_platform_model(args: argparse.Namespace) -> bool:
245
- """Validate platform and model arguments.
249
+ """验证平台和模型参数。
246
250
 
247
- Args:
248
- args: Command line arguments.
251
+ 参数:
252
+ args: 命令行参数。
249
253
 
250
- Returns:
251
- bool: True if platform and model are valid, False otherwise.
254
+ 返回:
255
+ bool: 如果平台和模型有效返回True,否则返回False
252
256
  """
253
257
  if not args.platform or not args.model:
254
258
  PrettyOutput.print(
@@ -260,10 +264,10 @@ def validate_platform_model(args: argparse.Namespace) -> bool:
260
264
 
261
265
 
262
266
  def chat_command(args: argparse.Namespace) -> None:
263
- """Process chat subcommand.
267
+ """处理聊天子命令。
264
268
 
265
- Args:
266
- args: Command line arguments.
269
+ 参数:
270
+ args: 命令行参数。
267
271
  """
268
272
  if not validate_platform_model(args):
269
273
  return
@@ -271,19 +275,19 @@ def chat_command(args: argparse.Namespace) -> None:
271
275
 
272
276
 
273
277
  def info_command(args: argparse.Namespace) -> None:
274
- """Process info subcommand.
278
+ """处理信息子命令。
275
279
 
276
- Args:
277
- args: Command line arguments.
280
+ 参数:
281
+ args: 命令行参数。
278
282
  """
279
283
  list_platforms()
280
284
 
281
285
 
282
286
  def service_command(args: argparse.Namespace) -> None:
283
- """Process service subcommand - start OpenAI-compatible API server.
287
+ """处理服务子命令 - 启动OpenAI兼容的API服务。
284
288
 
285
- Args:
286
- args: Command line arguments.
289
+ 参数:
290
+ args: 命令行参数。
287
291
  """
288
292
  start_service(
289
293
  host=args.host,
@@ -357,7 +361,7 @@ def role_command(args: argparse.Namespace) -> None:
357
361
 
358
362
 
359
363
  def main() -> None:
360
- """Main entry point for Jarvis platform manager."""
364
+ """Jarvis平台管理器的主入口点。"""
361
365
  init_env("欢迎使用 Jarvis-PlatformManager,您的平台管理助手已准备就绪!")
362
366
 
363
367
  parser = argparse.ArgumentParser(description="Jarvis AI 平台")
@@ -381,8 +385,12 @@ def main() -> None:
381
385
  service_parser.add_argument(
382
386
  "--port", type=int, default=8000, help="服务端口 (默认: 8000)"
383
387
  )
384
- service_parser.add_argument("--platform", "-p", help="指定默认平台,当客户端未指定平台时使用")
385
- service_parser.add_argument("--model", "-m", help="指定默认模型,当客户端未指定平台时使用")
388
+ service_parser.add_argument(
389
+ "--platform", "-p", help="指定默认平台,当客户端未指定平台时使用"
390
+ )
391
+ service_parser.add_argument(
392
+ "--model", "-m", help="指定默认模型,当客户端未指定平台时使用"
393
+ )
386
394
  service_parser.set_defaults(func=service_command)
387
395
 
388
396
  # role subcommand
@@ -8,6 +8,9 @@
8
8
  - 环境初始化
9
9
  """
10
10
  import os
11
+
12
+ # 全局变量:保存最后一条消息
13
+ last_message: str = ""
11
14
  from typing import Any, Set
12
15
 
13
16
  import colorama
@@ -149,3 +152,31 @@ def get_interrupt() -> int:
149
152
  int: 当前中断计数
150
153
  """
151
154
  return g_interrupt
155
+
156
+
157
+ def set_last_message(message: str) -> None:
158
+ """
159
+ 设置最后一条消息。
160
+
161
+ 参数:
162
+ message: 要保存的消息
163
+ """
164
+ global last_message
165
+ last_message = message
166
+
167
+
168
+ def get_last_message() -> str:
169
+ """
170
+ 获取最后一条消息。
171
+
172
+ 返回:
173
+ str: 最后一条消息
174
+ """
175
+ return last_message
176
+ """
177
+ 获取当前中断信号状态。
178
+
179
+ 返回:
180
+ int: 当前中断计数
181
+ """
182
+ return g_interrupt
@@ -8,16 +8,20 @@
8
8
  - 带有模糊匹配的文件路径补全
9
9
  - 用于输入控制的自定义键绑定
10
10
  """
11
- from colorama import Fore # type: ignore
12
- from colorama import Style as ColoramaStyle # type: ignore
13
- from fuzzywuzzy import process # type: ignore
14
- from prompt_toolkit import PromptSession # type: ignore
15
- from prompt_toolkit.completion import (CompleteEvent, Completer, Completion, # type: ignore
16
- PathCompleter) # type: ignore
17
- from prompt_toolkit.document import Document # type: ignore
18
- from prompt_toolkit.formatted_text import FormattedText # type: ignore
19
- from prompt_toolkit.key_binding import KeyBindings # type: ignore
20
- from prompt_toolkit.styles import Style as PromptStyle # type: ignore
11
+ from colorama import Fore # type: ignore
12
+ from colorama import Style as ColoramaStyle # type: ignore
13
+ from fuzzywuzzy import process # type: ignore
14
+ from prompt_toolkit import PromptSession # type: ignore
15
+ from prompt_toolkit.completion import (
16
+ CompleteEvent,
17
+ Completer,
18
+ Completion, # type: ignore
19
+ PathCompleter,
20
+ ) # type: ignore
21
+ from prompt_toolkit.document import Document # type: ignore
22
+ from prompt_toolkit.formatted_text import FormattedText # type: ignore
23
+ from prompt_toolkit.key_binding import KeyBindings # type: ignore
24
+ from prompt_toolkit.styles import Style as PromptStyle # type: ignore
21
25
 
22
26
  from jarvis.jarvis_utils.config import get_replace_map
23
27
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
@@ -34,7 +38,7 @@ def get_single_line_input(tip: str) -> str:
34
38
  返回:
35
39
  str: 用户的输入
36
40
  """
37
- session = PromptSession(history=None)
41
+ session: PromptSession = PromptSession(history=None)
38
42
  style = PromptStyle.from_dict(
39
43
  {
40
44
  "prompt": "ansicyan",
@@ -186,7 +190,7 @@ def get_multiline_input(tip: str) -> str:
186
190
  """
187
191
  # 显示输入说明
188
192
  PrettyOutput.section(
189
- "用户输入 - 使用 @ 触发文件补全,Tab 选择补全项,Ctrl+J 提交,按 Ctrl+C 取消输入",
193
+ "用户输入 - 使用 @ 触发文件补全,Tab 选择补全项,Ctrl+J 提交,Ctrl+O 复制最后一条消息,按 Ctrl+C 取消输入",
190
194
  OutputType.USER,
191
195
  )
192
196
  print(f"{Fore.GREEN}{tip}{ColoramaStyle.RESET_ALL}")
@@ -208,6 +212,24 @@ def get_multiline_input(tip: str) -> str:
208
212
  """处理Ctrl+J以提交输入。"""
209
213
  event.current_buffer.validate_and_handle()
210
214
 
215
+ @bindings.add("c-o")
216
+ def _(event):
217
+ """处理Ctrl+O以复制最后一条消息到剪贴板。"""
218
+ from jarvis.jarvis_utils.globals import get_last_message
219
+ import subprocess
220
+
221
+ last_msg = get_last_message()
222
+ if last_msg:
223
+ try:
224
+ # 使用xsel将内容复制到剪贴板
225
+ process = subprocess.Popen(["xsel", "-b", "-i"], stdin=subprocess.PIPE)
226
+ process.communicate(input=last_msg.encode("utf-8"))
227
+ PrettyOutput.print("已将最后一条消息复制到剪贴板", OutputType.INFO)
228
+ except Exception as e:
229
+ PrettyOutput.print(f"复制到剪贴板失败: {e}", OutputType.ERROR)
230
+ else:
231
+ PrettyOutput.print("没有可复制的消息", OutputType.INFO)
232
+
211
233
  # 配置提示会话
212
234
  style = PromptStyle.from_dict(
213
235
  {
@@ -217,14 +239,14 @@ def get_multiline_input(tip: str) -> str:
217
239
  try:
218
240
  import os
219
241
 
220
- from prompt_toolkit.history import FileHistory # type: ignore
242
+ from prompt_toolkit.history import FileHistory # type: ignore
221
243
 
222
244
  from jarvis.jarvis_utils.config import get_data_dir
223
245
 
224
246
  # 获取数据目录路径
225
247
  history_dir = get_data_dir()
226
248
  # 初始化带历史记录的会话
227
- session = PromptSession(
249
+ session: PromptSession = PromptSession(
228
250
  history=FileHistory(os.path.join(history_dir, "multiline_input_history")),
229
251
  completer=FileCompleter(),
230
252
  key_bindings=bindings,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.211
3
+ Version: 0.1.213
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -54,8 +54,8 @@ Requires-Dist: pillow==10.2.0
54
54
  Requires-Dist: openai==1.78.1
55
55
  Requires-Dist: tabulate==0.9.0
56
56
  Requires-Dist: pyte==0.8.2
57
- Requires-Dist: pyyaml>=6.0.2
58
57
  Requires-Dist: httpx>=0.28.1
58
+ Requires-Dist: pyyaml>=5.3.1
59
59
  Provides-Extra: dev
60
60
  Requires-Dist: pytest; extra == "dev"
61
61
  Requires-Dist: black; extra == "dev"
@@ -1,4 +1,4 @@
1
- jarvis/__init__.py,sha256=cqFgIO8VkgWyzLkk14Tuu3hR1gSzDxeUVLp3NtZ1Uos,75
1
+ jarvis/__init__.py,sha256=-plSw2QooYexMaRlKIRHSdkQww5y9LDlUdoQ1gxFf2I,75
2
2
  jarvis/jarvis_agent/__init__.py,sha256=QbI5vkourPJZ2OR63RBZAtFptTYrZz_si8bIkc9EB2o,31709
3
3
  jarvis/jarvis_agent/builtin_input_handler.py,sha256=1V7kV5Zhw2HE3Xgjs1R-43RZ2huq3Kg-32NCdNnyZmA,2216
4
4
  jarvis/jarvis_agent/edit_file_handler.py,sha256=bIciBghx5maDz09x0XNTxdNsyrBbTND95GupVdJIVVg,16762
@@ -45,15 +45,17 @@ jarvis/jarvis_methodology/main.py,sha256=-PqsWvtpUJkkhiGgV-1JegEnEZBmv8SHnNMNNm_
45
45
  jarvis/jarvis_multi_agent/__init__.py,sha256=sDd3sK88dS7_qAz2ywIAaEWdQ4iRVCiuBu2rQQmrKbU,4512
46
46
  jarvis/jarvis_multi_agent/main.py,sha256=h7VUSwoPrES0XTK8z5kt3XLX1mmcm8UEuFEHQOUWPH4,1696
47
47
  jarvis/jarvis_platform/__init__.py,sha256=WLQHSiE87PPket2M50_hHzjdMIgPIBx2VF8JfB_NNRk,105
48
- jarvis/jarvis_platform/base.py,sha256=xN0DGYs03eS-wSQk4JgBOzFl0kvDAmqnssUU59EOXU8,7775
48
+ jarvis/jarvis_platform/ai8.py,sha256=UWbe6kveQvOO8wMM9mh5YWyB0zapUEeFiYVPBMnhBAE,8845
49
+ jarvis/jarvis_platform/base.py,sha256=CBFk1Kq7qzOwafOj22bacXChWvCnap3D4IacZCWC_Ss,7882
49
50
  jarvis/jarvis_platform/human.py,sha256=_WQtC5w6QJnHh-3KuW8T49C-HucXiHsBEVw-m51ykj4,3196
50
- jarvis/jarvis_platform/kimi.py,sha256=W5MKkH6rxS5JeNY3VZY0EOT9ugeZJqr_eHO9wd9cEW4,12444
51
+ jarvis/jarvis_platform/kimi.py,sha256=w0-OJ6xkOGPApcc2Jvc30BMjabwrnzcndmsJJsWOWJg,13419
51
52
  jarvis/jarvis_platform/openai.py,sha256=uEjBikfFj7kp5wondLvOx4WdkmTX0aqF6kixxAufcHg,4806
53
+ jarvis/jarvis_platform/oyi.py,sha256=U6klSMESC69H9xTo44PXD1ZvdnMa5d7qE3jcPmPBspY,10662
52
54
  jarvis/jarvis_platform/registry.py,sha256=Sz4ADAaxuufpAQG0KSQZuL1yALzH-aF3FDapkNn5foE,8107
53
55
  jarvis/jarvis_platform/tongyi.py,sha256=juvzMjZ2mbNzSWzem8snmFuE28YVOjjE_YdHCZa9Qnw,20698
54
56
  jarvis/jarvis_platform/yuanbao.py,sha256=ZsKXWifESXGfvB9eOot1I6TnhlmgXwnaft3e2UXgSXk,21045
55
57
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
- jarvis/jarvis_platform_manager/main.py,sha256=Jm1ijKGZrSYo1HrgJ1R4JQZwPwiOIfDFYSVJXKPklPU,15585
58
+ jarvis/jarvis_platform_manager/main.py,sha256=tIb3jUuMF0ErislPjo8TkEUqL04snfEJwMPSZiOkMmY,15659
57
59
  jarvis/jarvis_platform_manager/service.py,sha256=rY1FmNl-tmbkkke_3SlH9h6ckyPIgmSwbaRorURp9Cc,14916
58
60
  jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
61
  jarvis/jarvis_smart_shell/main.py,sha256=DbhRSP1sZfSIaTltP1YWVDSQOTYEsbiOnfO9kSYwcNs,6959
@@ -79,16 +81,16 @@ jarvis/jarvis_utils/config.py,sha256=MO2-1z_7f3KkSrv7heGK1650Zb0SjnljO2hzLE2jA5c
79
81
  jarvis/jarvis_utils/embedding.py,sha256=oEOEM2qf16DMYwPsQe6srET9BknyjOdY2ef0jsp3Or8,2714
80
82
  jarvis/jarvis_utils/file_processors.py,sha256=XiM248SHS7lLgQDCbORVFWqinbVDUawYxWDOsLXDxP8,3043
81
83
  jarvis/jarvis_utils/git_utils.py,sha256=7AZblSD4b76vXxaDFkmZOy5rNkwvkwQQxGUy3NAusDQ,21641
82
- jarvis/jarvis_utils/globals.py,sha256=9NTMfCVd0jvtloOv14-KE6clhcVStFmyN9jWxLmQ5so,3369
84
+ jarvis/jarvis_utils/globals.py,sha256=WzZh_acNfHJj1LDulhyLQ7cojksBy0gdrITe0vH1XA0,3901
83
85
  jarvis/jarvis_utils/http.py,sha256=Uqt1kcz0HWnAfXHHi1fNGwLb2lcVUqpbrG2Uk_-kcIU,4882
84
- jarvis/jarvis_utils/input.py,sha256=ehvHkIgwqnBOHkwOeRCBFRggqOgOZuUdGQXn2ATUFwU,8049
86
+ jarvis/jarvis_utils/input.py,sha256=D0fQ6sRHjBaMm8s1L8HccC09Qlt_JD_SB_EHPCoztyA,8907
85
87
  jarvis/jarvis_utils/methodology.py,sha256=-cvM6pwgJK7BXCYg2uVjIId_j3v5RUh2z2PBcK_2vj4,8155
86
88
  jarvis/jarvis_utils/output.py,sha256=PRCgudPOB8gMEP3u-g0FGD2c6tBgJhLXUMqNPglfjV8,10813
87
89
  jarvis/jarvis_utils/tag.py,sha256=f211opbbbTcSyzCDwuIK_oCnKhXPNK-RknYyGzY1yD0,431
88
90
  jarvis/jarvis_utils/utils.py,sha256=BoRwLcixdf7mU3Tawe95ygGhQpkMffrFYLYhPwIvw8A,14498
89
- jarvis_ai_assistant-0.1.211.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
90
- jarvis_ai_assistant-0.1.211.dist-info/METADATA,sha256=DXSWr3mZ247PHP_3dZzWhbgM0QBJEo2uxK1YX6v9WoU,19564
91
- jarvis_ai_assistant-0.1.211.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
- jarvis_ai_assistant-0.1.211.dist-info/entry_points.txt,sha256=SF46ViTZcQVZEfbqzJDKKVc9TrN1x-P1mQ6wup7u2HY,875
93
- jarvis_ai_assistant-0.1.211.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
94
- jarvis_ai_assistant-0.1.211.dist-info/RECORD,,
91
+ jarvis_ai_assistant-0.1.213.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
92
+ jarvis_ai_assistant-0.1.213.dist-info/METADATA,sha256=ihUbPrOcnHuEjAtbY2-IA3Tj_ZOM3Hj_YDT5wJcsH6M,19564
93
+ jarvis_ai_assistant-0.1.213.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
94
+ jarvis_ai_assistant-0.1.213.dist-info/entry_points.txt,sha256=SF46ViTZcQVZEfbqzJDKKVc9TrN1x-P1mQ6wup7u2HY,875
95
+ jarvis_ai_assistant-0.1.213.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
96
+ jarvis_ai_assistant-0.1.213.dist-info/RECORD,,