jarvis-ai-assistant 0.1.184__py3-none-any.whl → 0.1.185__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.1.184"
4
+ __version__ = "0.1.185"
@@ -4,6 +4,8 @@ import datetime
4
4
  import platform
5
5
  from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple, Union
6
6
 
7
+ from jarvis.jarvis_platform.base import BasePlatform
8
+
7
9
  # 第三方库导入
8
10
  from yaspin import yaspin # type: ignore
9
11
 
@@ -166,7 +168,9 @@ class Agent:
166
168
  multiline_inputer: Optional[Callable[[str], str]] = None,
167
169
  use_methodology: Optional[bool] = None,
168
170
  use_analysis: Optional[bool] = None,
171
+ files: List[str] = [],
169
172
  ):
173
+ self.files = files
170
174
  """初始化Jarvis Agent实例
171
175
 
172
176
  参数:
@@ -218,8 +222,9 @@ class Agent:
218
222
  multiline_inputer if multiline_inputer else get_multiline_input
219
223
  )
220
224
 
225
+ # 如果有上传文件,自动禁用方法论
221
226
  self.use_methodology = (
222
- use_methodology if use_methodology is not None else is_use_methodology()
227
+ False if files else (use_methodology if use_methodology is not None else is_use_methodology())
223
228
  )
224
229
  self.use_analysis = (
225
230
  use_analysis if use_analysis is not None else is_use_analysis()
@@ -766,27 +771,31 @@ arguments:
766
771
  try:
767
772
  set_agent(self.name, self)
768
773
 
774
+ for handler in self.input_handler:
775
+ user_input, _ = handler(user_input, self)
776
+
769
777
  self.prompt = f"{user_input}"
770
778
 
771
- if self.first and self.use_methodology:
772
- # 先尝试上传方法轮
773
- platform = self.model if hasattr(self.model, "upload_files") else None
774
- if platform and upload_methodology(platform):
775
- self.prompt = f"{user_input}\n\n方法论已上传到平台,请参考平台上的方法论内容"
776
- else:
777
- msg = user_input
778
- for handler in self.input_handler:
779
- msg, _ = handler(msg, self)
780
- # 上传失败则回退到本地加载
781
- self.prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
779
+ if self.first:
780
+ # 如果有上传文件,先上传文件
781
+ if self.files and isinstance(self.model, BasePlatform) and hasattr(self.model, "upload_files"):
782
+ self.model.upload_files(self.files)
783
+ self.prompt = f"{user_input}\n\n已上传{len(self.files)}个文件到平台"
784
+
785
+ # 如果启用方法论且没有上传文件,上传方法论
786
+ elif self.use_methodology:
787
+ platform = self.model if hasattr(self.model, "upload_files") else None
788
+ if platform and upload_methodology(platform):
789
+ self.prompt = f"{user_input}\n\n方法论已上传到平台,请参考平台上的方法论内容"
790
+ else:
791
+ # 上传失败则回退到本地加载
792
+ self.prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(user_input, self.get_tool_registry())}"
782
793
 
783
794
  self.first = False
784
795
 
785
796
  self.conversation_length = get_context_token_count(self.prompt)
786
797
  while True:
787
798
  try:
788
- # 如果对话历史长度超过限制,在提示中添加提醒
789
-
790
799
  current_response = self._call_model(self.prompt, True)
791
800
  self.prompt = ""
792
801
  self.conversation_length += get_context_token_count(
@@ -95,7 +95,7 @@ def file_input_handler(user_input: str, agent: Any) -> Tuple[str, bool]:
95
95
  if files:
96
96
  with yaspin(text="正在读取文件...", color="cyan") as spinner:
97
97
  old_prompt = prompt
98
- result = FileOperationTool().execute({"operation":"read","files": files})
98
+ result = FileOperationTool().execute({"operation":"read","files": files, "agent": agent})
99
99
  if result["success"]:
100
100
  spinner.text = "文件读取完成"
101
101
  spinner.ok("✅")
@@ -1,22 +1,25 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  import json
3
3
  import os
4
- from typing import Any, Dict, Generator, List, Tuple
5
- import uuid
6
4
  import time
5
+ import uuid
6
+ from typing import Any, Dict, Generator, List, Tuple
7
7
 
8
8
  import requests
9
9
  from yaspin import yaspin
10
10
  from yaspin.spinners import Spinners
11
11
 
12
12
  from jarvis.jarvis_platform.base import BasePlatform
13
- from jarvis.jarvis_utils.output import PrettyOutput, OutputType
13
+ from jarvis.jarvis_utils.output import OutputType, PrettyOutput
14
14
  from jarvis.jarvis_utils.utils import while_success
15
15
 
16
16
 
17
17
  class TongyiPlatform(BasePlatform):
18
18
  """Tongyi platform implementation"""
19
19
 
20
+ # Supported image formats
21
+ IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".tiff"}
22
+
20
23
  platform_name = "tongyi"
21
24
 
22
25
  def __init__(self):
@@ -31,7 +34,6 @@ class TongyiPlatform(BasePlatform):
31
34
  self.system_message = "" # System message for initialization
32
35
  self.first_chat = True # Flag for first chat
33
36
 
34
-
35
37
  def _get_base_headers(self):
36
38
  return {
37
39
  "Host": "api.tongyi.com",
@@ -51,12 +53,12 @@ class TongyiPlatform(BasePlatform):
51
53
  "Referer": "https://www.tongyi.com/qianwen",
52
54
  "Accept-Encoding": "gzip, deflate, br, zstd",
53
55
  "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
54
- "Cookie": self.cookies
56
+ "Cookie": self.cookies,
55
57
  }
56
58
 
57
59
  def set_model_name(self, model_name: str):
58
60
  """Set model name
59
-
61
+
60
62
  Args:
61
63
  model_name: Model name to use
62
64
  """
@@ -72,52 +74,63 @@ class TongyiPlatform(BasePlatform):
72
74
  headers = self._get_base_headers()
73
75
 
74
76
  headers["accept"] = "text/event-stream"
75
-
76
- # Prepare contents array with message
77
- contents = [{
78
- "content": message,
79
- "contentType": "text",
80
- "role": "user",
81
- "ext": {
82
- "searchType": "",
83
- "pptGenerate": False,
84
- "deepThink": False,
85
- "deepResearch": False
86
- }
87
- }]
88
77
 
89
- # Add system message if it's first chat
90
- if self.first_chat and self.system_message:
91
- contents.insert(0, {
92
- "content": self.system_message,
78
+ # Prepare contents array with message
79
+ contents = [
80
+ {
81
+ "content": message,
93
82
  "contentType": "text",
94
- "role": "system",
83
+ "role": "user",
95
84
  "ext": {
96
85
  "searchType": "",
97
86
  "pptGenerate": False,
98
87
  "deepThink": False,
99
- "deepResearch": False
100
- }
101
- })
88
+ "deepResearch": False,
89
+ },
90
+ }
91
+ ]
92
+
93
+ # Add system message if it's first chat
94
+ if self.first_chat and self.system_message:
95
+ contents.insert(
96
+ 0,
97
+ {
98
+ "content": self.system_message,
99
+ "contentType": "text",
100
+ "role": "system",
101
+ "ext": {
102
+ "searchType": "",
103
+ "pptGenerate": False,
104
+ "deepThink": False,
105
+ "deepResearch": False,
106
+ },
107
+ },
108
+ )
102
109
  self.first_chat = False
103
110
 
104
111
  # Add uploaded files to contents if available and clear after use
105
112
  if self.uploaded_file_info:
106
113
  for file_info in self.uploaded_file_info:
107
- contents.append({
108
- "role": "user",
109
- "contentType": "file",
110
- "content": file_info["url"],
111
- "ext": {
112
- "fileSize": file_info.get("fileSize", 0),
113
- "batchId": file_info.get("batchId", ""),
114
- "docId": file_info.get("docId", "")
114
+ # Determine content type based on fileKey extension
115
+ file_ext = os.path.splitext(file_info["fileKey"])[1].lower()
116
+ is_image = file_ext in self.IMAGE_EXTENSIONS
117
+
118
+ contents.append(
119
+ {
120
+ "role": "user",
121
+ "contentType": "image" if is_image else "file",
122
+ "content": file_info["url"],
123
+ "ext": {
124
+ "fileSize": file_info.get("fileSize", 0),
125
+ "batchId": file_info.get("batchId", ""),
126
+ "docId": file_info.get("docId", ""),
127
+ },
115
128
  }
116
- })
129
+ )
117
130
  # Clear uploaded file info after using it
118
131
  self.uploaded_file_info = []
119
132
 
120
- payload = {
133
+ payload: Dict[str, Any] = {
121
134
  "model": "",
122
135
  "action": "next",
123
136
  "mode": "chat",
@@ -135,13 +148,18 @@ class TongyiPlatform(BasePlatform):
135
148
  "specifiedModel": "",
136
149
  "deepThink": True if self.model_name == "Thinking" else False,
137
150
  "deepResearch": False,
138
- "fileUploadBatchId": self.uploaded_file_info[0]["batchId"] if self.uploaded_file_info else ""
151
+ "fileUploadBatchId": self.uploaded_file_info[0]["batchId"]
152
+ if self.uploaded_file_info
153
+ else "",
139
154
  },
140
- "contents": contents
155
+ "contents": contents,
141
156
  }
142
157
 
143
158
  try:
144
- response = while_success(lambda: requests.post(url, headers=headers, json=payload, stream=True), sleep_time=5)
159
+ response = while_success(
160
+ lambda: requests.post(url, headers=headers, json=payload, stream=True),
161
+ sleep_time=5,
162
+ )
145
163
  if response.status_code != 200:
146
164
  raise Exception(f"HTTP {response.status_code}: {response.text}")
147
165
  msg_id = ""
@@ -152,7 +170,7 @@ class TongyiPlatform(BasePlatform):
152
170
  for line in response.iter_lines():
153
171
  if not line:
154
172
  continue
155
- line_str = line.decode('utf-8')
173
+ line_str = line.decode("utf-8")
156
174
  if not line_str.startswith("data: "):
157
175
  continue
158
176
 
@@ -163,7 +181,7 @@ class TongyiPlatform(BasePlatform):
163
181
  msg_id = data["msgId"]
164
182
  if "sessionId" in data:
165
183
  session_id = data["sessionId"]
166
-
184
+
167
185
  if "contents" in data and len(data["contents"]) > 0:
168
186
  for content in data["contents"]:
169
187
  if content.get("contentType") == "think":
@@ -171,13 +189,17 @@ class TongyiPlatform(BasePlatform):
171
189
  yield "<think>\n\n"
172
190
  in_thinking = True
173
191
  if content.get("incremental"):
174
- tmp_content = json.loads(content.get("content"))["content"]
192
+ tmp_content = json.loads(content.get("content"))[
193
+ "content"
194
+ ]
175
195
  thinking_content += tmp_content
176
196
  yield tmp_content
177
197
  else:
178
- tmp_content = json.loads(content.get("content"))["content"]
198
+ tmp_content = json.loads(content.get("content"))[
199
+ "content"
200
+ ]
179
201
  if len(thinking_content) < len(tmp_content):
180
- yield tmp_content[len(thinking_content):]
202
+ yield tmp_content[len(thinking_content) :]
181
203
  thinking_content = tmp_content
182
204
  else:
183
205
  # thinking_content = "aaa</thi"
@@ -191,7 +213,9 @@ class TongyiPlatform(BasePlatform):
191
213
  # print("--------------------------------")
192
214
  # print(tmp_content)
193
215
  # print("--------------------------------")
194
- yield "\r\n</think>\n"[len(thinking_content)-len(tmp_content):]
216
+ yield "\r\n</think>\n"[
217
+ len(thinking_content) - len(tmp_content) :
218
+ ]
195
219
  thinking_content = tmp_content
196
220
  in_thinking = False
197
221
  elif content.get("contentType") == "text":
@@ -204,10 +228,9 @@ class TongyiPlatform(BasePlatform):
204
228
  else:
205
229
  tmp_content = content.get("content")
206
230
  if len(text_content) < len(tmp_content):
207
- yield tmp_content[len(text_content):]
231
+ yield tmp_content[len(text_content) :]
208
232
  text_content = tmp_content
209
233
 
210
-
211
234
  except json.JSONDecodeError:
212
235
  continue
213
236
 
@@ -218,10 +241,10 @@ class TongyiPlatform(BasePlatform):
218
241
 
219
242
  except Exception as e:
220
243
  raise Exception(f"Chat failed: {str(e)}")
221
-
244
+
222
245
  def _get_upload_token(self) -> Dict[str, Any]:
223
246
  """Get upload token from Tongyi API
224
-
247
+
225
248
  Returns:
226
249
  Dict[str, Any]: Upload token information including accessId, bucketName, etc.
227
250
  """
@@ -230,33 +253,34 @@ class TongyiPlatform(BasePlatform):
230
253
  payload = {}
231
254
 
232
255
  try:
233
- response = while_success(lambda: requests.post(url, headers=headers, json=payload), sleep_time=5)
256
+ response = while_success(
257
+ lambda: requests.post(url, headers=headers, json=payload), sleep_time=5
258
+ )
234
259
  if response.status_code != 200:
235
260
  raise Exception(f"HTTP {response.status_code}: {response.text}")
236
-
261
+
237
262
  result = response.json()
238
263
  if not result.get("success"):
239
264
  raise Exception(f"Failed to get upload token: {result.get('errorMsg')}")
240
-
265
+
241
266
  return result.get("data", {})
242
-
267
+
243
268
  except Exception as e:
244
269
  raise Exception(f"Failed to get upload token: {str(e)}")
245
-
246
270
 
247
271
  def upload_files(self, file_list: List[str]) -> bool:
248
272
  """Upload files to Tongyi platform and get download links
249
-
273
+
250
274
  Args:
251
275
  file_list: List of file paths to upload
252
-
276
+
253
277
  Returns:
254
278
  List[Dict[str, str]]: List of dictionaries containing file info and download URLs
255
279
  """
256
280
  try:
257
281
  upload_token = self._get_upload_token()
258
282
  uploaded_files = []
259
-
283
+
260
284
  for file_path in file_list:
261
285
  file_name = os.path.basename(file_path)
262
286
  with yaspin(Spinners.dots, text=f"上传文件 {file_name}") as spinner:
@@ -265,75 +289,85 @@ class TongyiPlatform(BasePlatform):
265
289
  spinner.text = f"文件不存在: {file_path}"
266
290
  spinner.fail("❌")
267
291
  return False
268
-
292
+
269
293
  # Get file name and content type
270
294
  content_type = self._get_content_type(file_path)
271
-
295
+
272
296
  spinner.text = f"准备上传文件: {file_name}"
273
-
297
+
274
298
  # Prepare form data
275
299
  form_data = {
276
- 'OSSAccessKeyId': upload_token['accessId'],
277
- 'policy': upload_token['policy'],
278
- 'signature': upload_token['signature'],
279
- 'key': f"{upload_token['dir']}{file_name}",
280
- 'dir': upload_token['dir'],
281
- 'success_action_status': '200'
300
+ "OSSAccessKeyId": upload_token["accessId"],
301
+ "policy": upload_token["policy"],
302
+ "signature": upload_token["signature"],
303
+ "key": f"{upload_token['dir']}{file_name}",
304
+ "dir": upload_token["dir"],
305
+ "success_action_status": "200",
282
306
  }
283
-
307
+
284
308
  # Prepare files
285
309
  files = {
286
- 'file': (file_name, open(file_path, 'rb'), content_type)
310
+ "file": (file_name, open(file_path, "rb"), content_type)
287
311
  }
288
-
312
+
289
313
  spinner.text = f"正在上传文件: {file_name}"
290
-
314
+
291
315
  # Upload file
292
316
  response = requests.post(
293
- upload_token['host'],
294
- data=form_data,
295
- files=files
317
+ upload_token["host"], data=form_data, files=files
296
318
  )
297
-
319
+
298
320
  if response.status_code != 200:
299
- spinner.text = f"上传失败 {file_name}: HTTP {response.status_code}"
321
+ spinner.text = (
322
+ f"上传失败 {file_name}: HTTP {response.status_code}"
323
+ )
300
324
  spinner.fail("❌")
301
325
  return False
302
-
303
- uploaded_files.append({
304
- 'fileKey': file_name,
305
- 'fileType': 'file',
306
- 'dir': upload_token['dir']
307
- })
326
+
327
+ # Determine file type based on extension
328
+ file_ext = os.path.splitext(file_path)[1].lower()
329
+ is_image = file_ext in self.IMAGE_EXTENSIONS
308
330
 
331
+ uploaded_files.append(
332
+ {
333
+ "fileKey": file_name,
334
+ "fileType": "image" if is_image else "file",
335
+ "dir": upload_token["dir"],
336
+ }
337
+ )
338
+
309
339
  spinner.text = f"获取下载链接: {file_name}"
310
-
340
+
311
341
  # Get download links for uploaded files
312
342
  url = "https://api.tongyi.com/dialog/downloadLink/batch"
313
343
  headers = self._get_base_headers()
314
344
  payload = {
315
- "fileKeys": [f['fileKey'] for f in uploaded_files],
316
- "fileType": "file",
317
- "dir": upload_token['dir']
345
+ "fileKeys": [f["fileKey"] for f in uploaded_files],
346
+ "fileType": "image" if any(f["fileType"] == "image" for f in uploaded_files) else "file",
347
+ "dir": upload_token["dir"],
318
348
  }
319
-
349
+
320
350
  response = requests.post(url, headers=headers, json=payload)
321
351
  if response.status_code != 200:
322
352
  spinner.text = f"获取下载链接失败: HTTP {response.status_code}"
323
353
  spinner.fail("❌")
324
354
  return False
325
-
355
+
326
356
  result = response.json()
327
357
  if not result.get("success"):
328
358
  spinner.text = f"获取下载链接失败: {result.get('errorMsg')}"
329
359
  spinner.fail("❌")
330
360
  return False
331
-
361
+
332
362
  # Add files to chat
333
- self.uploaded_file_info = result.get("data", {}).get("results", [])
363
+ self.uploaded_file_info = result.get("data", {}).get(
364
+ "results", []
365
+ )
334
366
  for file_info in self.uploaded_file_info:
335
367
  spinner.text = f"添加文件到对话: {file_name}"
336
- add_url = "https://api.tongyi.com/assistant/api/chat/file/add"
368
+ add_url = (
369
+ "https://api.tongyi.com/assistant/api/chat/file/add"
370
+ )
337
371
  add_payload = {
338
372
  "workSource": "chat",
339
373
  "terminal": "web",
@@ -345,70 +379,79 @@ class TongyiPlatform(BasePlatform):
345
379
  "workId": file_info["docId"],
346
380
  "workResourcePath": file_info["url"],
347
381
  "sessionId": "",
348
- "batchId": str(uuid.uuid4()).replace('-', '')[:32], # Generate random batchId
349
- "fileSize": os.path.getsize(file_path)
382
+ "batchId": str(uuid.uuid4()).replace("-", "")[
383
+ :32
384
+ ], # Generate random batchId
385
+ "fileSize": os.path.getsize(file_path),
350
386
  }
351
-
352
- add_response = requests.post(add_url, headers=headers, json=add_payload)
387
+
388
+ add_response = requests.post(
389
+ add_url, headers=headers, json=add_payload
390
+ )
353
391
  if add_response.status_code != 200:
354
- spinner.text = f"添加文件到对话失败: HTTP {add_response.status_code}"
392
+ spinner.text = (
393
+ f"添加文件到对话失败: HTTP {add_response.status_code}"
394
+ )
355
395
  spinner.fail("❌")
356
396
  continue
357
-
397
+
358
398
  add_result = add_response.json()
359
399
  if not add_result.get("success"):
360
- spinner.text = f"添加文件到对话失败: {add_result.get('errorMsg')}"
400
+ spinner.text = (
401
+ f"添加文件到对话失败: {add_result.get('errorMsg')}"
402
+ )
361
403
  spinner.fail("❌")
362
404
  continue
363
-
405
+
364
406
  file_info.update(add_result.get("data", {}))
365
-
407
+
366
408
  spinner.text = f"文件 {file_name} 上传成功"
367
409
  spinner.ok("✅")
368
410
  time.sleep(1) # 短暂暂停以便用户看到成功状态
369
-
411
+
370
412
  except Exception as e:
371
413
  spinner.text = f"上传文件 {file_name} 时出错: {str(e)}"
372
414
  spinner.fail("❌")
373
415
  return False
374
-
375
416
  return True
376
-
417
+
377
418
  except Exception as e:
378
419
  PrettyOutput.print(f"Error uploading files: {str(e)}", OutputType.ERROR)
379
420
  return False
380
-
421
+
381
422
  def _get_content_type(self, file_path: str) -> str:
382
423
  """Get content type for file
383
-
424
+
384
425
  Args:
385
426
  file_path: Path to file
386
-
427
+
387
428
  Returns:
388
429
  str: Content type
389
430
  """
390
431
  ext = os.path.splitext(file_path)[1].lower()
391
432
  content_types = {
392
- '.txt': 'text/plain',
393
- '.md': 'text/markdown',
394
- '.doc': 'application/msword',
395
- '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
396
- '.xls': 'application/vnd.ms-excel',
397
- '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
398
- '.pdf': 'application/pdf',
399
- '.png': 'image/png',
400
- '.jpg': 'image/jpeg',
401
- '.jpeg': 'image/jpeg',
402
- '.gif': 'image/gif',
403
- '.mp4': 'video/mp4',
404
- '.mp3': 'audio/mpeg',
405
- '.wav': 'audio/wav'
433
+ ".txt": "text/plain",
434
+ ".md": "text/markdown",
435
+ ".doc": "application/msword",
436
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
437
+ ".xls": "application/vnd.ms-excel",
438
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
439
+ ".pdf": "application/pdf",
440
+ ".epub": "application/epub+zip",
441
+ ".mobi": "application/x-mobipocket-ebook",
442
+ ".jpg": "image/jpeg",
443
+ ".jpeg": "image/jpeg",
444
+ ".png": "image/png",
445
+ ".gif": "image/gif",
446
+ ".webp": "image/webp",
447
+ ".bmp": "image/bmp",
448
+ ".tiff": "image/tiff",
406
449
  }
407
- return content_types.get(ext, 'application/octet-stream')
450
+ return content_types.get(ext, "application/octet-stream")
408
451
 
409
452
  def name(self) -> str:
410
453
  """Get platform name
411
-
454
+
412
455
  Returns:
413
456
  str: Platform name
414
457
  """
@@ -416,7 +459,7 @@ class TongyiPlatform(BasePlatform):
416
459
 
417
460
  def delete_chat(self) -> bool:
418
461
  """Delete chat history
419
-
462
+
420
463
  Returns:
421
464
  bool: True if deletion successful, False otherwise
422
465
  """
@@ -425,14 +468,17 @@ class TongyiPlatform(BasePlatform):
425
468
 
426
469
  url = "https://api.tongyi.com/dialog/session/delete"
427
470
  headers = self._get_base_headers()
428
- payload = {
429
- "sessionId": self.session_id
430
- }
471
+ payload: Dict[str, Any] = {"sessionId": self.session_id}
431
472
 
432
473
  try:
433
- response = while_success(lambda: requests.post(url, headers=headers, json=payload), sleep_time=5)
474
+ response = while_success(
475
+ lambda: requests.post(url, headers=headers, json=payload), sleep_time=5
476
+ )
434
477
  if response.status_code != 200:
435
- PrettyOutput.print(f"Failed to delete chat: HTTP {response.status_code}", OutputType.ERROR)
478
+ PrettyOutput.print(
479
+ f"Failed to delete chat: HTTP {response.status_code}",
480
+ OutputType.ERROR,
481
+ )
436
482
  return False
437
483
  self.request_id = ""
438
484
  self.session_id = ""
@@ -445,7 +491,7 @@ class TongyiPlatform(BasePlatform):
445
491
 
446
492
  def set_system_message(self, message: str):
447
493
  """Set system message
448
-
494
+
449
495
  Args:
450
496
  message: System message to set
451
497
  """
@@ -453,7 +499,7 @@ class TongyiPlatform(BasePlatform):
453
499
 
454
500
  def get_model_list(self) -> List[Tuple[str, str]]:
455
501
  """Get available model list
456
-
502
+
457
503
  Returns:
458
504
  List[Tuple[str, str]]: List of (model_id, model_name) tuples
459
505
  """
@@ -466,7 +512,7 @@ class TongyiPlatform(BasePlatform):
466
512
 
467
513
  def support_web(self) -> bool:
468
514
  """Check if platform supports web functionality
469
-
515
+
470
516
  Returns:
471
517
  bool: True if web is supported, False otherwise
472
518
  """
@@ -157,7 +157,7 @@ class FileSearchReplaceTool:
157
157
 
158
158
  if file_exists and agent:
159
159
  files = agent.get_user_data("files")
160
- if not files or files.get(file_path, None) is None:
160
+ if not files or file_path not in files:
161
161
  return {
162
162
  "success": False,
163
163
  "stdout": "",
@@ -169,6 +169,21 @@ class FileSearchReplaceTool:
169
169
  success, temp_content = fast_edit(file_path, changes, spinner)
170
170
  if not success:
171
171
  success, temp_content = slow_edit(file_path, yaml.safe_dump(changes, allow_unicode=True), spinner)
172
+ if not success:
173
+ spinner.text = f"文件 {file_path} 处理失败"
174
+ spinner.fail("❌")
175
+ return {
176
+ "success": False,
177
+ "stdout": "",
178
+ "stderr": temp_content
179
+ }
180
+ else:
181
+ spinner.text = f"文件 {file_path} 内容生成完成"
182
+ spinner.ok("✅")
183
+ else:
184
+ spinner.text = f"文件 {file_path} 内容生成完成"
185
+ spinner.ok("✅")
186
+
172
187
 
173
188
  # 只有当所有替换操作都成功时,才写回文件
174
189
  if success and (temp_content != original_content or not file_exists):
@@ -184,13 +199,6 @@ class FileSearchReplaceTool:
184
199
  stdout_message = f"文件 {file_path} {action} 完成"
185
200
  stdout_messages.append(stdout_message)
186
201
  PrettyOutput.print(stdout_message, OutputType.SUCCESS)
187
- elif success:
188
- stdout_message = f"文件 {file_path} 没有找到需要替换的内容"
189
- stdout_messages.append(stdout_message)
190
- PrettyOutput.print(stdout_message, OutputType.INFO)
191
- else:
192
- stdout_message = f"文件 {file_path} 修改失败"
193
- stdout_messages.append(stdout_message)
194
202
 
195
203
  except Exception as e:
196
204
  stderr_message = f"处理文件 {file_path} 时出错: {str(e)}"
@@ -344,7 +352,7 @@ def slow_edit(filepath: str, patch_content: str, spinner: Yaspin) -> Tuple[bool,
344
352
  if upload_success:
345
353
  response = model.chat_until_success(main_prompt)
346
354
  else:
347
- return False, ""
355
+ return False, "文件上传失败"
348
356
 
349
357
  # 解析差异化补丁
350
358
  diff_blocks = re.finditer(ot("DIFF")+r'\s*>{4,} SEARCH\n?(.*?)\n?={4,}\n?(.*?)\s*<{4,} REPLACE\n?'+ct("DIFF"),
@@ -357,17 +365,17 @@ def slow_edit(filepath: str, patch_content: str, spinner: Yaspin) -> Tuple[bool,
357
365
  "replace": match.group(2).strip()
358
366
  })
359
367
 
360
- success, modified_content = fast_edit(filepath, patches, spinner)
368
+ success, modified_content_or_err = fast_edit(filepath, patches, spinner)
361
369
  if success:
362
- return True, modified_content
370
+ return True, modified_content_or_err
363
371
  spinner.text = f"文件 {filepath} 修改失败"
364
372
  spinner.fail("❌")
365
- return False, ""
373
+ return False, f"文件修改失败: {modified_content_or_err}"
366
374
 
367
375
  except Exception as e:
368
376
  spinner.text = f"文件修改失败: {str(e)}"
369
377
  spinner.fail("❌")
370
- return False, ""
378
+ return False, f"文件修改失败: {str(e)}"
371
379
 
372
380
 
373
381
  def fast_edit(filepath: str, patches: List[Dict[str,str]], spinner: Yaspin) -> Tuple[bool, str]:
@@ -410,6 +418,7 @@ def fast_edit(filepath: str, patches: List[Dict[str,str]], spinner: Yaspin) -> T
410
418
  modified_content = file_content
411
419
  patch_count = 0
412
420
  success = True
421
+ err_msg = ""
413
422
  for patch in patches:
414
423
  search_text = patch["search"]
415
424
  replace_text = patch["replace"]
@@ -419,6 +428,7 @@ def fast_edit(filepath: str, patches: List[Dict[str,str]], spinner: Yaspin) -> T
419
428
  # 如果有多处,报错
420
429
  if modified_content.count(search_text) > 1:
421
430
  success = False
431
+ err_msg = f"搜索文本 {search_text} 在文件中存在多处,请检查补丁内容"
422
432
  break
423
433
  # 应用替换
424
434
  modified_content = modified_content.replace(
@@ -426,10 +436,11 @@ def fast_edit(filepath: str, patches: List[Dict[str,str]], spinner: Yaspin) -> T
426
436
  spinner.write(f"✅ 补丁 #{patch_count} 应用成功")
427
437
  else:
428
438
  success = False
439
+ err_msg = f"搜索文本 {search_text} 在文件中不存在,请检查补丁内容"
429
440
  break
430
441
  if not success:
431
442
  revert_file(filepath)
432
- return False, ""
443
+ return False, err_msg
433
444
 
434
445
 
435
446
  spinner.text = f"文件 {filepath} 修改完成,应用了 {patch_count} 个补丁"
@@ -100,6 +100,7 @@ arguments:
100
100
  - 假设工具结果
101
101
  - 创建虚构对话
102
102
  - 在没有所需信息的情况下继续
103
+ - yaml 格式错误
103
104
  </common_errors>
104
105
  </tool_system_guide>
105
106
  """
@@ -532,7 +533,7 @@ class ToolRegistry(OutputHandlerProtocol):
532
533
  else:
533
534
  return (
534
535
  {},
535
- f"""工具调用格式错误,请检查工具调用格式。
536
+ f"""工具调用格式错误,请检查工具调用格式(缺少name、arguments、want字段)。
536
537
 
537
538
  {tool_call_help}""",
538
539
  )
@@ -541,6 +542,8 @@ class ToolRegistry(OutputHandlerProtocol):
541
542
  {},
542
543
  f"""工具调用格式错误,请检查工具调用格式。
543
544
 
545
+ {e}
546
+
544
547
  {tool_call_help}""",
545
548
  )
546
549
  if len(ret) > 1:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.184
3
+ Version: 0.1.185
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -85,7 +85,7 @@ Dynamic: requires-python
85
85
 
86
86
  ## 🌟 核心特色 <a id="core-features"></a>
87
87
 
88
- - 🆓 零成本接入:无缝集成元宝、Kimi等优质模型,无需支付API费用,同时保留强大的文件处理、搜索和推理能力
88
+ - 🆓 零成本接入:无缝集成腾讯元宝(推荐首选)、Kimi等优质模型,无需支付API费用,同时保留强大的文件处理、搜索和推理能力
89
89
  - 🛠️ 工具驱动:内置丰富工具集,涵盖脚本执行、代码开发、网页搜索、终端操作等核心功能
90
90
  - 👥 人机协作:支持实时交互,用户可随时介入指导,确保AI行为符合预期
91
91
  - 🔌 高度可扩展:支持自定义工具和平台,轻松集成MCP协议
@@ -117,9 +117,9 @@ pip3 install jarvis-ai-assistant
117
117
 
118
118
  将以下配置写入到`~/.jarvis/config.yaml`文件中。
119
119
 
120
- #### 腾讯元宝
120
+ #### 腾讯元宝 (推荐首选)
121
121
  ```yaml
122
- JARVIS_PLATFORM: yuanbao
122
+ JARVIS_PLATFORM: yuanbao # 推荐使用腾讯元宝平台,适配性最佳
123
123
  JARVIS_MODEL: deep_seek_v3
124
124
  JARVIS_THINKING_PLATFORM: yuanbao
125
125
  JARVIS_THINKING_MODEL: deep_seek
@@ -1,7 +1,7 @@
1
- jarvis/__init__.py,sha256=UVbYHDbtqKBl_4D6WpfOn7709w_VMH6nNObt9rvVuV0,74
2
- jarvis/jarvis_agent/__init__.py,sha256=AxT_2n-IQkbtoQlAS3SJ0tsvcUenWD7_Xrc-RZZCWiA,30352
1
+ jarvis/__init__.py,sha256=ctjj6eadc4jmCb3OWiljzQVxwTvbZ3urmkzwzSyoUYQ,74
2
+ jarvis/jarvis_agent/__init__.py,sha256=Mho0roePyuitHFjVCbK0_v9FPgEarcpvgIvY3sQKLws,30843
3
3
  jarvis/jarvis_agent/builtin_input_handler.py,sha256=f4DaEHPakXcAbgykFP-tiOQP6fh_yGFlZx_h91_j2tQ,1529
4
- jarvis/jarvis_agent/file_input_handler.py,sha256=LDNXoTtyjhyBmfzDnAdbWZ2BWdu4q-r6thSKRK8Iwjk,4187
4
+ jarvis/jarvis_agent/file_input_handler.py,sha256=7u8pXWD7F9mmiJkr9XO83mhFu40FSRoYQm55DbZHgQo,4203
5
5
  jarvis/jarvis_agent/jarvis.py,sha256=UkNMVUlSNKV6y3v12eAhqc_gIDB6Obxrwk5f7-sQeiQ,6137
6
6
  jarvis/jarvis_agent/main.py,sha256=GkjMTIbsd56nkVuRwD_tU_PZWyzixZZhMjVOCd0SzOA,2669
7
7
  jarvis/jarvis_agent/output_handler.py,sha256=7qori-RGrQmdiFepoEe3oPPKJIvRt90l_JDmvCoa4zA,1219
@@ -52,7 +52,7 @@ jarvis/jarvis_platform/human.py,sha256=xwaTZ1zdrAYZZFXxkbHvUdECwCGsic0kgAFUncUr4
52
52
  jarvis/jarvis_platform/kimi.py,sha256=b3EpnmHseZwrfCc8sMmvwLJ6Jg2FWf8ATItSDz5G3eQ,11978
53
53
  jarvis/jarvis_platform/openai.py,sha256=VyX3bR1rGxrJdWOtUBf8PgSL9n06KaNbOewL1urzOnk,4741
54
54
  jarvis/jarvis_platform/registry.py,sha256=3djxE8AB4gwrdAOvRSL0612Rt_CcsaZhzZ0_oXHu6xk,7820
55
- jarvis/jarvis_platform/tongyi.py,sha256=mgnNC9II3TpiNLD8VgqswqoEAFXoMg8ZG29L3f5kYOo,19711
55
+ jarvis/jarvis_platform/tongyi.py,sha256=NhE8ssvTI_XoDkXhaGtrUJG6q6-0OApWvh8d7wuFZXg,20944
56
56
  jarvis/jarvis_platform/yuanbao.py,sha256=FDi-D9Jnw_MiwI0skPNMYz874o6GhWhdNRdZg-ECoUA,20632
57
57
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  jarvis/jarvis_platform_manager/main.py,sha256=OXWj18SqiV0Gl75YT6D9wspCCB4Nes04EY-ShI9kbpU,25677
@@ -65,7 +65,7 @@ jarvis/jarvis_tools/chdir.py,sha256=DNKVFrWqu6t_sZ2ipv99s6802QR4cSGlqKlmaI--arE,
65
65
  jarvis/jarvis_tools/code_plan.py,sha256=gWR0lzY62x2PxWKoMRBqW6jq7zQuO8vhpjC4TcHSYjk,7685
66
66
  jarvis/jarvis_tools/create_code_agent.py,sha256=-nHfo5O5pDIG5IX3w1ClQafGvGcdI2_w75-KGrD-gUQ,3458
67
67
  jarvis/jarvis_tools/create_sub_agent.py,sha256=lyFrrg4V0yXULmU3vldwGp_euZjwZzJcRU6mJ20zejY,3023
68
- jarvis/jarvis_tools/edit_file.py,sha256=gxnVijz-mOHpb9A7WTPIqCwmZHInSHwu_Psa_GvNWRQ,16724
68
+ jarvis/jarvis_tools/edit_file.py,sha256=czpJOY7Y7Q-SH3UiGfgfB1lRcqtzuIZjLBLZzozcl0A,17330
69
69
  jarvis/jarvis_tools/execute_script.py,sha256=IA1SkcnwBB9PKG2voBNx5N9GXL303OC7OOtdqRfqWOk,6428
70
70
  jarvis/jarvis_tools/file_analyzer.py,sha256=7ILHkUFm8pPZn1y_s4uT0kaWHP-EmlHnpkovDdA1yRE,4872
71
71
  jarvis/jarvis_tools/file_operation.py,sha256=WloC1-oPJLwgICu4WBc9f7XA8N_Ggl73QQ5CxM2XTlE,9464
@@ -73,7 +73,7 @@ jarvis/jarvis_tools/generate_new_tool.py,sha256=dLfOliIUm0ovLrHcZAhKm7lqhxwACv8m
73
73
  jarvis/jarvis_tools/methodology.py,sha256=m7cQmVhhQpUUl_uYTVvcW0JBovQLx5pWTXh_8K77HsU,5237
74
74
  jarvis/jarvis_tools/read_code.py,sha256=pL2SwZDsJbJMXo4stW96quFsLgbtPVIAW-h4sDKsLtM,6274
75
75
  jarvis/jarvis_tools/read_webpage.py,sha256=PFAYuKjay9j6phWzyuZ99ZfNaHJljmRWAgS0bsvbcvE,2219
76
- jarvis/jarvis_tools/registry.py,sha256=WvYPiaUrleFqeXvwRkxM-6TNs1sWm61mpg1MFVo_kas,25113
76
+ jarvis/jarvis_tools/registry.py,sha256=qdA0fgWeh_UG-_Rt5COnHTcKmOsYBiIU69uX4cLvfWI,25199
77
77
  jarvis/jarvis_tools/rewrite_file.py,sha256=3V2l7kG5DG9iRimBce-1qCRuJPL0QM32SBTzOl2zCqM,7004
78
78
  jarvis/jarvis_tools/search_web.py,sha256=rzxrCOTEo-MmLQrKI4k-AbfidUfJUeCPK4f5ZJy48G8,952
79
79
  jarvis/jarvis_tools/virtual_tty.py,sha256=8E_n-eC-RRPTqYx6BI5Q2RnorY8dbhKFBfAjIiRQROA,16397
@@ -91,9 +91,9 @@ jarvis/jarvis_utils/methodology.py,sha256=A8pE8ZqNHvGKaDO4TFtg7Oz-hAXPBcQfhmSPWM
91
91
  jarvis/jarvis_utils/output.py,sha256=QboL42GtG_dnvd1O64sl8o72mEBhXNRADPXQMXgDE7Q,9661
92
92
  jarvis/jarvis_utils/tag.py,sha256=YJHmuedLb7_AiqvKQetHr4R1FxyzIh7HN0RRkWMmYbU,429
93
93
  jarvis/jarvis_utils/utils.py,sha256=atSK-2cUr7_tOIFsQzJnuQxebi7aFN4jtmaoXEaV4jM,10692
94
- jarvis_ai_assistant-0.1.184.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
95
- jarvis_ai_assistant-0.1.184.dist-info/METADATA,sha256=1H6UQ0hW12J_lgy6Mhq3pOj7BrD1_1a_taBK9JE4oi0,15836
96
- jarvis_ai_assistant-0.1.184.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
97
- jarvis_ai_assistant-0.1.184.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
98
- jarvis_ai_assistant-0.1.184.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
99
- jarvis_ai_assistant-0.1.184.dist-info/RECORD,,
94
+ jarvis_ai_assistant-0.1.185.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
95
+ jarvis_ai_assistant-0.1.185.dist-info/METADATA,sha256=_DBSrubK80hRPGRBLXCpPy7r7tgcEhMM_RDS82mvaFw,15923
96
+ jarvis_ai_assistant-0.1.185.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
97
+ jarvis_ai_assistant-0.1.185.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
98
+ jarvis_ai_assistant-0.1.185.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
99
+ jarvis_ai_assistant-0.1.185.dist-info/RECORD,,