jarvis-ai-assistant 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.3.13"
4
+ __version__ = "0.3.15"
@@ -51,10 +51,12 @@ from jarvis.jarvis_utils.globals import (
51
51
  )
52
52
  from jarvis.jarvis_utils.input import get_multiline_input, user_confirm
53
53
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
54
- from jarvis.jarvis_utils.tag import ct, ot
54
+ from jarvis.jarvis_utils.tag import ot
55
55
 
56
56
 
57
- def show_agent_startup_stats(agent_name: str, model_name: str) -> None:
57
+ def show_agent_startup_stats(
58
+ agent_name: str, model_name: str, tool_registry_instance: Optional[Any] = None
59
+ ) -> None:
58
60
  """输出启动时的统计信息
59
61
 
60
62
  参数:
@@ -87,8 +89,11 @@ def show_agent_startup_stats(agent_name: str, model_name: str) -> None:
87
89
  total_tool_count = len(tool_registry_all.tools)
88
90
 
89
91
  # 获取可用工具的数量(应用过滤)
90
- tool_registry = ToolRegistry()
91
- available_tool_count = len(tool_registry.get_all_tools())
92
+ if tool_registry_instance is not None:
93
+ available_tool_count = len(tool_registry_instance.get_all_tools())
94
+ else:
95
+ tool_registry = ToolRegistry()
96
+ available_tool_count = len(tool_registry.get_all_tools())
92
97
 
93
98
  global_memory_dir = Path(get_data_dir()) / "memory" / "global_long_term"
94
99
  global_memory_count = 0
@@ -188,7 +193,12 @@ class Agent:
188
193
 
189
194
  def __del__(self):
190
195
  # 只有在记录启动时才停止记录
191
- delete_agent(self.name)
196
+ try:
197
+ name = getattr(self, "name", None)
198
+ if name:
199
+ delete_agent(name)
200
+ except Exception:
201
+ pass
192
202
 
193
203
  def get_tool_usage_prompt(self) -> str:
194
204
  """获取工具使用提示"""
@@ -203,8 +213,8 @@ class Agent:
203
213
  model_group: Optional[str] = None,
204
214
  summary_prompt: Optional[str] = None,
205
215
  auto_complete: bool = False,
206
- output_handler: List[OutputHandlerProtocol] = [],
207
- use_tools: List[str] = [],
216
+ output_handler: Optional[List[OutputHandlerProtocol]] = None,
217
+ use_tools: Optional[List[str]] = None,
208
218
  input_handler: Optional[List[Callable[[str, Any], Tuple[str, bool]]]] = None,
209
219
  execute_tool_confirm: Optional[bool] = None,
210
220
  need_summary: bool = True,
@@ -212,7 +222,7 @@ class Agent:
212
222
  use_methodology: Optional[bool] = None,
213
223
  use_analysis: Optional[bool] = None,
214
224
  force_save_memory: Optional[bool] = None,
215
- files: List[str] = [],
225
+ files: Optional[List[str]] = None,
216
226
  ):
217
227
  """初始化Jarvis Agent实例
218
228
 
@@ -225,7 +235,6 @@ class Agent:
225
235
  auto_complete: 是否自动完成任务
226
236
  output_handler: 输出处理器列表
227
237
  input_handler: 输入处理器列表
228
- max_context_length: 最大上下文长度
229
238
  execute_tool_confirm: 执行工具前是否需要确认
230
239
  need_summary: 是否需要生成总结
231
240
  multiline_inputer: 多行输入处理器
@@ -234,7 +243,7 @@ class Agent:
234
243
  force_save_memory: 是否强制保存记忆
235
244
  """
236
245
  # 基础属性初始化
237
- self.files = files
246
+ self.files = files or []
238
247
  self.name = make_agent_name(name)
239
248
  self.description = description
240
249
  self.system_prompt = system_prompt
@@ -250,7 +259,15 @@ class Agent:
250
259
  self._init_session()
251
260
 
252
261
  # 初始化处理器
253
- self._init_handlers(output_handler, input_handler, multiline_inputer, use_tools)
262
+ safe_output_handlers: List[OutputHandlerProtocol] = []
263
+ if output_handler:
264
+ safe_output_handlers = output_handler
265
+ safe_use_tools: List[str] = []
266
+ if use_tools:
267
+ safe_use_tools = use_tools
268
+ self._init_handlers(
269
+ safe_output_handlers, input_handler, multiline_inputer, safe_use_tools
270
+ )
254
271
 
255
272
  # 初始化配置
256
273
  self._init_config(
@@ -271,7 +288,7 @@ class Agent:
271
288
  self._setup_system_prompt()
272
289
 
273
290
  # 输出统计信息(包含欢迎信息)
274
- show_agent_startup_stats(name, self.model.name()) # type: ignore
291
+ show_agent_startup_stats(name, self.model.name(), self.get_tool_registry()) # type: ignore
275
292
 
276
293
  def _init_model(self, llm_type: str, model_group: Optional[str]):
277
294
  """初始化模型平台"""
@@ -528,7 +545,7 @@ class Agent:
528
545
 
529
546
  该方法将:
530
547
  1. 提示用户保存重要记忆
531
- 2. 调用_generate_summary生成摘要
548
+ 2. 调用 generate_summary 生成摘要
532
549
  3. 清除对话历史
533
550
  4. 保留系统消息
534
551
  5. 添加摘要作为新上下文
@@ -566,6 +583,8 @@ class Agent:
566
583
  # 清理历史(但不清理prompt,因为prompt会在builtin_input_handler中设置)
567
584
  if self.model:
568
585
  self.model.reset()
586
+ # 重置后重新设置系统提示词,确保系统约束仍然生效
587
+ self._setup_system_prompt()
569
588
  # 重置会话
570
589
  self.session.clear_history()
571
590
 
@@ -68,13 +68,15 @@ class CodeAgent:
68
68
  "retrieve_memory",
69
69
  "clear_memory",
70
70
  ]
71
-
71
+
72
72
  if append_tools:
73
- additional_tools = [tool.strip() for tool in append_tools.split(",")]
73
+ additional_tools = [
74
+ t for t in (tool.strip() for tool in append_tools.split(",")) if t
75
+ ]
74
76
  base_tools.extend(additional_tools)
75
77
  # 去重
76
78
  base_tools = list(dict.fromkeys(base_tools))
77
-
79
+
78
80
  tool_registry.use_tools(base_tools)
79
81
  code_system_prompt = self._get_system_prompt()
80
82
  self.agent = Agent(
@@ -231,18 +233,21 @@ class CodeAgent:
231
233
  else:
232
234
  print("ℹ️ .jarvis已在.gitignore中")
233
235
 
234
- def _handle_git_changes(self) -> None:
236
+ def _handle_git_changes(self, prefix: str, suffix: str) -> None:
235
237
  """处理git仓库中的未提交修改"""
236
238
  print("🔄 正在检查未提交的修改...")
237
239
  if has_uncommitted_changes():
238
240
  print("⏳ 发现未提交修改,正在处理...")
239
241
  git_commiter = GitCommitTool()
240
- git_commiter.execute({})
242
+ git_commiter.execute({
243
+ "prefix": prefix,
244
+ "suffix": suffix
245
+ })
241
246
  print("✅ 未提交修改已处理完成")
242
247
  else:
243
248
  print("✅ 没有未提交的修改")
244
249
 
245
- def _init_env(self) -> None:
250
+ def _init_env(self, prefix: str, suffix: str) -> None:
246
251
  """初始化环境,组合以下功能:
247
252
  1. 查找git根目录
248
253
  2. 检查并更新.gitignore文件
@@ -252,7 +257,7 @@ class CodeAgent:
252
257
  print("🚀 正在初始化环境...")
253
258
  git_dir = self._find_git_root()
254
259
  self._update_gitignore(git_dir)
255
- self._handle_git_changes()
260
+ self._handle_git_changes(prefix, suffix)
256
261
  # 配置git对换行符变化不敏感
257
262
  self._configure_line_ending_settings()
258
263
  print("✅ 环境初始化完成")
@@ -412,18 +417,23 @@ class CodeAgent:
412
417
  return
413
418
 
414
419
  # 获取当前分支的提交总数
415
- commit_result = subprocess.run(
416
- ["git", "rev-list", "--count", "HEAD"],
417
- capture_output=True,
418
- text=True,
419
- encoding="utf-8",
420
- errors="replace",
421
- check=True,
422
- )
423
- if commit_result.returncode != 0:
424
- return
425
-
426
- commit_count = int(commit_result.stdout.strip())
420
+ # 兼容空仓库或无 HEAD 的场景:失败时将提交计数视为 0,继续执行提交流程
421
+ commit_count = 0
422
+ try:
423
+ commit_result = subprocess.run(
424
+ ["git", "rev-list", "--count", "HEAD"],
425
+ capture_output=True,
426
+ text=True,
427
+ encoding="utf-8",
428
+ errors="replace",
429
+ check=False,
430
+ )
431
+ if commit_result.returncode == 0:
432
+ out = commit_result.stdout.strip()
433
+ if out.isdigit():
434
+ commit_count = int(out)
435
+ except Exception:
436
+ commit_count = 0
427
437
 
428
438
  # 暂存所有修改
429
439
  subprocess.run(["git", "add", "."], check=True)
@@ -466,7 +476,11 @@ class CodeAgent:
466
476
  return commits
467
477
 
468
478
  def _handle_commit_confirmation(
469
- self, commits: List[Tuple[str, str]], start_commit: Optional[str]
479
+ self,
480
+ commits: List[Tuple[str, str]],
481
+ start_commit: Optional[str],
482
+ prefix: str,
483
+ suffix: str,
470
484
  ) -> None:
471
485
  """处理提交确认和可能的重置"""
472
486
  if commits and user_confirm("是否接受以上提交记录?", True):
@@ -482,7 +496,10 @@ class CodeAgent:
482
496
  check=True,
483
497
  )
484
498
  git_commiter = GitCommitTool()
485
- git_commiter.execute({})
499
+ git_commiter.execute({
500
+ "prefix": prefix,
501
+ "suffix": suffix
502
+ })
486
503
 
487
504
  # 在用户接受commit后,根据配置决定是否保存记忆
488
505
  if self.agent.force_save_memory:
@@ -491,7 +508,7 @@ class CodeAgent:
491
508
  os.system(f"git reset --hard {str(start_commit)}") # 确保转换为字符串
492
509
  PrettyOutput.print("已重置到初始提交", OutputType.INFO)
493
510
 
494
- def run(self, user_input: str) -> Optional[str]:
511
+ def run(self, user_input: str, prefix: str = "", suffix: str = "") -> Optional[str]:
495
512
  """使用给定的用户输入运行代码代理。
496
513
 
497
514
  参数:
@@ -501,7 +518,7 @@ class CodeAgent:
501
518
  str: 描述执行结果的输出,成功时返回None
502
519
  """
503
520
  try:
504
- self._init_env()
521
+ self._init_env(prefix, suffix)
505
522
  start_commit = get_latest_commit_hash()
506
523
 
507
524
  # 获取项目统计信息并附加到用户输入
@@ -550,7 +567,7 @@ class CodeAgent:
550
567
  self._handle_uncommitted_changes()
551
568
  end_commit = get_latest_commit_hash()
552
569
  commits = self._show_commit_history(start_commit, end_commit)
553
- self._handle_commit_confirmation(commits, start_commit)
570
+ self._handle_commit_confirmation(commits, start_commit, prefix, suffix)
554
571
  return None
555
572
 
556
573
  except RuntimeError as e:
@@ -634,11 +651,12 @@ class CodeAgent:
634
651
  ):
635
652
  agent.session.prompt += final_ret
636
653
  return
637
- agent.session.prompt += final_ret
654
+ # 用户未确认,允许输入自定义回复作为附加提示
638
655
  custom_reply = get_multiline_input("请输入自定义回复")
639
- if custom_reply.strip(): # 如果自定义回复为空,返回空字符串
656
+ if custom_reply.strip(): # 如果自定义回复为空,不设置附加提示
640
657
  agent.set_addon_prompt(custom_reply)
641
658
  agent.session.prompt += final_ret
659
+ return
642
660
 
643
661
 
644
662
  @app.command()
@@ -663,6 +681,16 @@ def cli(
663
681
  "--restore-session",
664
682
  help="从 .jarvis/saved_session.json 恢复会话状态",
665
683
  ),
684
+ prefix: str = typer.Option(
685
+ "",
686
+ "--prefix",
687
+ help="提交信息前缀(用空格分隔)",
688
+ ),
689
+ suffix: str = typer.Option(
690
+ "",
691
+ "--suffix",
692
+ help="提交信息后缀(用换行分隔)",
693
+ ),
666
694
  ) -> None:
667
695
  """Jarvis主入口点。"""
668
696
  init_env("欢迎使用 Jarvis-CodeAgent,您的代码工程助手已准备就绪!")
@@ -723,13 +751,13 @@ def cli(
723
751
  )
724
752
 
725
753
  if requirement:
726
- agent.run(requirement)
754
+ agent.run(requirement, prefix=prefix, suffix=suffix)
727
755
  else:
728
756
  while True:
729
757
  user_input = get_multiline_input("请输入你的需求(输入空行退出):")
730
758
  if not user_input:
731
759
  raise typer.Exit(code=0)
732
- agent.run(user_input)
760
+ agent.run(user_input, prefix=prefix, suffix=suffix)
733
761
 
734
762
  except typer.Exit:
735
763
  raise
@@ -4,7 +4,7 @@ import threading
4
4
  from typing import Any, Callable, Dict, List
5
5
  from urllib.parse import urljoin
6
6
 
7
- import requests
7
+ import requests # type: ignore[import-untyped]
8
8
 
9
9
  from jarvis.jarvis_mcp import McpClient
10
10
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
@@ -25,6 +25,8 @@ class StreamableMcpClient(McpClient):
25
25
  self.base_url = config.get("base_url", "")
26
26
  if not self.base_url:
27
27
  raise ValueError("No base_url specified in config")
28
+ # Normalize base_url to ensure trailing slash for urljoin correctness
29
+ self.base_url = self.base_url.rstrip("/") + "/"
28
30
 
29
31
  # 设置HTTP客户端
30
32
  self.session = requests.Session()
@@ -43,6 +45,8 @@ class StreamableMcpClient(McpClient):
43
45
  # 添加额外的HTTP头
44
46
  extra_headers = config.get("headers", {})
45
47
  self.session.headers.update(extra_headers)
48
+ # Request timeouts (connect, read) in seconds; can be overridden via config["timeout"]
49
+ self.timeout = config.get("timeout", (10, 300))
46
50
 
47
51
  # 请求相关属性
48
52
  self.pending_requests: Dict[str, threading.Event] = {} # 存储等待响应的请求 {id: Event}
@@ -141,7 +145,9 @@ class StreamableMcpClient(McpClient):
141
145
 
142
146
  # 发送请求到Streamable HTTP端点
143
147
  mcp_url = urljoin(self.base_url, "mcp")
144
- response = self.session.post(mcp_url, json=request, stream=True) # 启用流式传输
148
+ response = self.session.post(
149
+ mcp_url, json=request, stream=True, timeout=self.timeout
150
+ ) # 启用流式传输
145
151
  response.raise_for_status()
146
152
 
147
153
  # 处理流式响应
@@ -149,17 +155,21 @@ class StreamableMcpClient(McpClient):
149
155
  for line in response.iter_lines(decode_unicode=True):
150
156
  if line:
151
157
  try:
152
- data = json.loads(line)
158
+ line_data = line
159
+ if isinstance(line_data, str) and line_data.startswith("data:"):
160
+ # Handle SSE-formatted lines like "data: {...}"
161
+ line_data = line_data.split(":", 1)[1].strip()
162
+ data = json.loads(line_data)
153
163
  if "id" in data and data["id"] == req_id:
154
164
  # 这是我们的请求响应
155
165
  result = data
156
166
  break
157
167
  elif "method" in data:
158
168
  # 这是一个通知
159
- method = data.get("method", "")
169
+ notify_method = data.get("method", "")
160
170
  params = data.get("params", {})
161
- if method in self.notification_handlers:
162
- for handler in self.notification_handlers[method]:
171
+ if notify_method in self.notification_handlers:
172
+ for handler in self.notification_handlers[notify_method]:
163
173
  try:
164
174
  handler(params)
165
175
  except Exception as e:
@@ -171,6 +181,8 @@ class StreamableMcpClient(McpClient):
171
181
  PrettyOutput.print(f"无法解析响应: {line}", OutputType.WARNING)
172
182
  continue
173
183
 
184
+ # Ensure response is closed after streaming
185
+ response.close()
174
186
  if result is None:
175
187
  raise RuntimeError(f"未收到响应: {method}")
176
188
 
@@ -198,8 +210,9 @@ class StreamableMcpClient(McpClient):
198
210
 
199
211
  # 发送通知到Streamable HTTP端点
200
212
  mcp_url = urljoin(self.base_url, "mcp")
201
- response = self.session.post(mcp_url, json=notification)
213
+ response = self.session.post(mcp_url, json=notification, timeout=self.timeout)
202
214
  response.raise_for_status()
215
+ response.close()
203
216
 
204
217
  except Exception as e:
205
218
  PrettyOutput.print(f"发送通知失败: {str(e)}", OutputType.ERROR)
@@ -7,6 +7,7 @@ import asyncio
7
7
  import json
8
8
  import os
9
9
  import time
10
+ import threading
10
11
  import uuid
11
12
  from datetime import datetime
12
13
  from typing import Any, Dict, List, Optional, Union
@@ -72,8 +73,16 @@ def start_service(
72
73
  ) -> None:
73
74
  """Start OpenAI-compatible API server."""
74
75
  # Create logs directory if it doesn't exist
75
- logs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
76
- os.makedirs(logs_dir, exist_ok=True)
76
+ # Prefer environment variable, then user directory, fall back to CWD
77
+ logs_dir = os.environ.get("JARVIS_LOG_DIR")
78
+ if not logs_dir:
79
+ logs_dir = os.path.join(os.path.expanduser("~"), ".jarvis", "logs")
80
+ try:
81
+ os.makedirs(logs_dir, exist_ok=True)
82
+ except Exception:
83
+ # As a last resort, use current working directory
84
+ logs_dir = os.path.join(os.getcwd(), "logs")
85
+ os.makedirs(logs_dir, exist_ok=True)
77
86
 
78
87
  app = FastAPI(title="Jarvis API Server")
79
88
 
@@ -81,7 +90,7 @@ def start_service(
81
90
  app.add_middleware(
82
91
  CORSMiddleware,
83
92
  allow_origins=["*"],
84
- allow_credentials=True,
93
+ allow_credentials=False,
85
94
  allow_methods=["*"],
86
95
  allow_headers=["*"],
87
96
  )
@@ -228,23 +237,23 @@ def start_service(
228
237
  "messages": [{"role": m.role, "content": m.content} for m in messages],
229
238
  }
230
239
 
231
- # Log the conversation
232
- log_conversation(
233
- conversation_id,
234
- [{"role": m.role, "content": m.content} for m in messages],
235
- model,
236
- )
240
+ # Logging moved to post-response to avoid duplicates
237
241
 
238
242
  if stream:
239
243
  # Return streaming response
240
244
  return StreamingResponse(
241
245
  stream_chat_response(platform, message_text, model), # type: ignore
242
246
  media_type="text/event-stream",
247
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"},
243
248
  )
244
249
 
245
250
  # Get chat response
246
251
  try:
247
- response_text = platform.chat_until_success(message_text)
252
+ # Run potentially blocking call in a thread to avoid blocking the event loop
253
+ loop = asyncio.get_running_loop()
254
+ response_text = await loop.run_in_executor(
255
+ None, lambda: platform.chat_until_success(message_text)
256
+ )
248
257
 
249
258
  # Create response in OpenAI format
250
259
  completion_id = f"chatcmpl-{str(uuid.uuid4())}"
@@ -287,11 +296,31 @@ def start_service(
287
296
  raise HTTPException(status_code=500, detail=str(exc))
288
297
 
289
298
  async def stream_chat_response(platform: Any, message: str, model_name: str) -> Any:
290
- """Stream chat response in OpenAI-compatible format."""
299
+ """Stream chat response in OpenAI-compatible format without blocking the event loop."""
291
300
  completion_id = f"chatcmpl-{str(uuid.uuid4())}"
292
301
  created_time = int(time.time())
293
302
  conversation_id = str(uuid.uuid4())
294
303
 
304
+ loop = asyncio.get_running_loop()
305
+ queue: asyncio.Queue = asyncio.Queue()
306
+ SENTINEL = object()
307
+
308
+ def producer() -> None:
309
+ try:
310
+ for chunk in platform.chat(message):
311
+ if chunk:
312
+ asyncio.run_coroutine_threadsafe(queue.put(chunk), loop)
313
+ except Exception as exc:
314
+ # Use a special dict to pass error across thread boundary
315
+ asyncio.run_coroutine_threadsafe(
316
+ queue.put({"__error__": str(exc)}), loop
317
+ )
318
+ finally:
319
+ asyncio.run_coroutine_threadsafe(queue.put(SENTINEL), loop)
320
+
321
+ # Start producer thread
322
+ threading.Thread(target=producer, daemon=True).start()
323
+
295
324
  # Send the initial chunk with the role
296
325
  initial_data = {
297
326
  "id": completion_id,
@@ -304,36 +333,20 @@ def start_service(
304
333
  }
305
334
  yield f"data: {json.dumps(initial_data)}\n\n"
306
335
 
307
- try:
308
- # Use the streaming-capable chat method
309
- response_generator = platform.chat(message)
310
-
311
- full_response = ""
312
- has_content = False
313
-
314
- # Iterate over the generator and stream chunks
315
- for chunk in response_generator:
316
- if chunk:
317
- has_content = True
318
- full_response += chunk
319
- chunk_data = {
320
- "id": completion_id,
321
- "object": "chat.completion.chunk",
322
- "created": created_time,
323
- "model": model_name,
324
- "choices": [
325
- {
326
- "index": 0,
327
- "delta": {"content": chunk},
328
- "finish_reason": None,
329
- }
330
- ],
331
- }
332
- yield f"data: {json.dumps(chunk_data)}\n\n"
336
+ full_response = ""
337
+ has_content = False
338
+
339
+ while True:
340
+ item = await queue.get()
341
+ if item is SENTINEL:
342
+ break
343
+
344
+ if isinstance(item, dict) and "__error__" in item:
345
+ error_msg = f"Error during streaming: {item['__error__']}"
346
+ PrettyOutput.print(error_msg, OutputType.ERROR)
333
347
 
334
- if not has_content:
335
- no_response_message = "No response from model."
336
- chunk_data = {
348
+ # Send error information in the stream
349
+ error_chunk = {
337
350
  "id": completion_id,
338
351
  "object": "chat.completion.chunk",
339
352
  "created": created_time,
@@ -341,41 +354,45 @@ def start_service(
341
354
  "choices": [
342
355
  {
343
356
  "index": 0,
344
- "delta": {"content": no_response_message},
345
- "finish_reason": None,
357
+ "delta": {"content": error_msg},
358
+ "finish_reason": "stop",
346
359
  }
347
360
  ],
348
361
  }
349
- yield f"data: {json.dumps(chunk_data)}\n\n"
350
- full_response = no_response_message
362
+ yield f"data: {json.dumps(error_chunk)}\n\n"
363
+ yield "data: [DONE]\n\n"
364
+
365
+ # Log the error
366
+ log_conversation(
367
+ conversation_id,
368
+ [{"role": "user", "content": message}],
369
+ model_name,
370
+ response=f"ERROR: {error_msg}",
371
+ )
372
+ return
351
373
 
352
- # Send the final chunk with finish_reason
353
- final_data = {
374
+ # Normal chunk
375
+ chunk = item
376
+ has_content = True
377
+ full_response += chunk
378
+ chunk_data = {
354
379
  "id": completion_id,
355
380
  "object": "chat.completion.chunk",
356
381
  "created": created_time,
357
382
  "model": model_name,
358
- "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
383
+ "choices": [
384
+ {
385
+ "index": 0,
386
+ "delta": {"content": chunk},
387
+ "finish_reason": None,
388
+ }
389
+ ],
359
390
  }
360
- yield f"data: {json.dumps(final_data)}\n\n"
391
+ yield f"data: {json.dumps(chunk_data)}\n\n"
361
392
 
362
- # Send the [DONE] marker
363
- yield "data: [DONE]\n\n"
364
-
365
- # Log the full conversation
366
- log_conversation(
367
- conversation_id,
368
- [{"role": "user", "content": message}],
369
- model_name,
370
- full_response,
371
- )
372
-
373
- except Exception as exc:
374
- error_msg = f"Error during streaming: {str(exc)}"
375
- PrettyOutput.print(error_msg, OutputType.ERROR)
376
-
377
- # Send error information in the stream
378
- error_chunk = {
393
+ if not has_content:
394
+ no_response_message = "No response from model."
395
+ chunk_data = {
379
396
  "id": completion_id,
380
397
  "object": "chat.completion.chunk",
381
398
  "created": created_time,
@@ -383,21 +400,34 @@ def start_service(
383
400
  "choices": [
384
401
  {
385
402
  "index": 0,
386
- "delta": {"content": error_msg},
387
- "finish_reason": "stop",
403
+ "delta": {"content": no_response_message},
404
+ "finish_reason": None,
388
405
  }
389
406
  ],
390
407
  }
391
- yield f"data: {json.dumps(error_chunk)}\n\n"
392
- yield "data: [DONE]\n\n"
408
+ yield f"data: {json.dumps(chunk_data)}\n\n"
409
+ full_response = no_response_message
393
410
 
394
- # Log the error
395
- log_conversation(
396
- conversation_id,
397
- [{"role": "user", "content": message}],
398
- model_name,
399
- response=f"ERROR: {error_msg}",
400
- )
411
+ # Send the final chunk with finish_reason
412
+ final_data = {
413
+ "id": completion_id,
414
+ "object": "chat.completion.chunk",
415
+ "created": created_time,
416
+ "model": model_name,
417
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
418
+ }
419
+ yield f"data: {json.dumps(final_data)}\n\n"
420
+
421
+ # Send the [DONE] marker
422
+ yield "data: [DONE]\n\n"
423
+
424
+ # Log the full conversation
425
+ log_conversation(
426
+ conversation_id,
427
+ [{"role": "user", "content": message}],
428
+ model_name,
429
+ full_response,
430
+ )
401
431
 
402
432
  # Run the server
403
433
  uvicorn.run(app, host=host, port=port)
@@ -79,7 +79,7 @@ want: 当前的git状态,期望获取xxx的提交记录
79
79
  name: execute_script
80
80
  arguments:
81
81
  interpreter: bash
82
- script_cotent: |2
82
+ script_content: |2
83
83
  git status --porcelain
84
84
  {ct("TOOL_CALL")}
85
85
  </string_format>
@@ -630,7 +630,9 @@ class ToolRegistry(OutputHandlerProtocol):
630
630
  content: 包含工具调用的内容
631
631
 
632
632
  返回:
633
- List[Dict]: 包含名称和参数的提取工具调用列表
633
+ Tuple[Dict[str, Dict[str, Any]], str]:
634
+ - 第一个元素是提取的工具调用字典
635
+ - 第二个元素是错误消息字符串(成功时为"")
634
636
 
635
637
  异常:
636
638
  Exception: 如果工具调用缺少必要字段
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.3.13
3
+ Version: 0.3.15
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -1,5 +1,5 @@
1
- jarvis/__init__.py,sha256=sNG-zIDnzHZonw0jMokUfOeIX1O9_pvk1H7gGrXcEUI,74
2
- jarvis/jarvis_agent/__init__.py,sha256=vpioyz2E-F4hTK0A2isCbdeH6_90kfZ3FM-I5_mOnaM,31879
1
+ jarvis/__init__.py,sha256=pgHuAj3RKAHzddxR4-Pj5AIL9SywaQNWi0v2Ayu1tvQ,74
2
+ jarvis/jarvis_agent/__init__.py,sha256=CkFa66l5lM0-_zlzApwBxTYbrnbC4_NqdD4QuK3H1VQ,32614
3
3
  jarvis/jarvis_agent/agent_manager.py,sha256=YzpMiF0H2-eyk2kn2o24Bkj3bXsQx7Pv2vfD4gWepo0,2893
4
4
  jarvis/jarvis_agent/builtin_input_handler.py,sha256=wS-FqpT3pIXwHn1dfL3SpXonUKWgVThbQueUIeyRc2U,2917
5
5
  jarvis/jarvis_agent/config_editor.py,sha256=Ctk82sO6w2cNW0-_5L7Bomj-hgM4U7WwMc52fwhAJyg,1809
@@ -21,7 +21,7 @@ jarvis/jarvis_agent/task_manager.py,sha256=HJm4_SMpsFbQMUUsAZeHm7cZuhNbz28YW-DRL
21
21
  jarvis/jarvis_agent/tool_executor.py,sha256=k73cKhZEZpljvui4ZxALlFEIE-iLzJ32Softsmiwzqk,1896
22
22
  jarvis/jarvis_agent/tool_share_manager.py,sha256=R5ONIQlDXX9pFq3clwHFhEW8BAJ3ECaR2DqWCEC9tzM,5205
23
23
  jarvis/jarvis_code_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- jarvis/jarvis_code_agent/code_agent.py,sha256=KEku3DaunvjLVX7lIvTRsHT-DlN2K5bZT0ypfgYtLmA,29555
24
+ jarvis/jarvis_code_agent/code_agent.py,sha256=Sw3I_IrUjHCSe2UbgmbSEZUk4uM4EOThabJtS7OmulU,30637
25
25
  jarvis/jarvis_code_agent/lint.py,sha256=LZPsfyZPMo7Wm7LN4osZocuNJwZx1ojacO3MlF870x8,4009
26
26
  jarvis/jarvis_code_analysis/code_review.py,sha256=OLoMtXz7Kov6cVTdBoxq_OsX_j0rb7Rk3or5tKgiLpo,36023
27
27
  jarvis/jarvis_code_analysis/checklists/__init__.py,sha256=LIXAYa1sW3l7foP6kohLWnE98I_EQ0T7z5bYKHq6rJA,78
@@ -52,7 +52,7 @@ jarvis/jarvis_git_utils/git_commiter.py,sha256=GpSnVa72b9yWoJBbK1Qp_Kb4iimwVW6K7
52
52
  jarvis/jarvis_mcp/__init__.py,sha256=OPMtjD-uq9xAaKCRIDyKIosaFfBe1GBPu1az-mQ0rVM,2048
53
53
  jarvis/jarvis_mcp/sse_mcp_client.py,sha256=neKrgFxwLDPWjVrl9uDt1ricNwbLZbv1ZEFh0IkmqZk,22656
54
54
  jarvis/jarvis_mcp/stdio_mcp_client.py,sha256=APYUksYKlMx7AVNODKOLrTkKZPnp4kqTQIYIuNDDKko,11286
55
- jarvis/jarvis_mcp/streamable_mcp_client.py,sha256=sP0KEsxVcXGht0eA7a_m-ECtZAk39s4PL9OUdm35x2Y,14467
55
+ jarvis/jarvis_mcp/streamable_mcp_client.py,sha256=P5keAhI7SsVjAq3nU9J7pp2Tk4pJDxjdPAb6ZcVPLEc,15279
56
56
  jarvis/jarvis_memory_organizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
57
  jarvis/jarvis_memory_organizer/memory_organizer.py,sha256=4tf6Bs8u6Drj4repvuY3-XeH2Sb6ajVMFcW-rQEiGEY,26502
58
58
  jarvis/jarvis_methodology/main.py,sha256=6QF8hH3vB6rfxim0fPR34uVPf41zVpb4ZLqrFN2qONg,10983
@@ -69,7 +69,7 @@ jarvis/jarvis_platform/tongyi.py,sha256=KXEMfylTU91kHisXSaiz8dxzNXK_d7XD9vjuw4yX
69
69
  jarvis/jarvis_platform/yuanbao.py,sha256=32hjk1Ju1tqrMpF47JsSuaxej5K-gUPxjsDu9g0briY,23575
70
70
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
71
  jarvis/jarvis_platform_manager/main.py,sha256=5k7D-tBsNjXPH07eO4f-0gwyUY7STGSNBSl1PbLq15A,20966
72
- jarvis/jarvis_platform_manager/service.py,sha256=myJYGSUclCEiRTf3JKs4JndwhXJeQj7MQQy4i13jMt0,13767
72
+ jarvis/jarvis_platform_manager/service.py,sha256=DnuRJjD7RvunGt3LpMfUDr-Bps-Nb--frkeaC0nwxj0,14874
73
73
  jarvis/jarvis_rag/__init__.py,sha256=HRTXgnQxDuaE9x-e3r6SYqhJ5d4DSI_rrIxy2IGY6qk,320
74
74
  jarvis/jarvis_rag/cache.py,sha256=Tqx_Oe-AhuWlMXHGHUaIuG6OEHoHBVZq7mL3kldtFFU,2723
75
75
  jarvis/jarvis_rag/cli.py,sha256=bIQKibp8swJDyfFBXaiX5C20LHN_2W2knO2I-MQp58c,15620
@@ -97,7 +97,7 @@ jarvis/jarvis_tools/generate_new_tool.py,sha256=uaWKlDMGjetvvwKTj0_AVTdmd14IktRb
97
97
  jarvis/jarvis_tools/methodology.py,sha256=_K4GIDUodGEma3SvNRo7Qs5rliijgNespVLyAPN35JU,5233
98
98
  jarvis/jarvis_tools/read_code.py,sha256=EnI-R-5HyIQYhMD391nZWXHIuHHBF-OJIRE0QpLcPX4,6417
99
99
  jarvis/jarvis_tools/read_webpage.py,sha256=NmDUboVZd4CGHBPRFK6dp3uqVhuGopW1bOi3TcaLDF4,2092
100
- jarvis/jarvis_tools/registry.py,sha256=TtZ415LUMfWqfcgn3G5V4e3QLLU2ILNRatkP10U0Ypw,31047
100
+ jarvis/jarvis_tools/registry.py,sha256=nOcj_WKkEEiR2CJ84REchKSaWFGjnUFD3EVwxtfVF74,31165
101
101
  jarvis/jarvis_tools/retrieve_memory.py,sha256=0UBZm4wQTXLTj5WHXR9fjsiIDQh-Z2UINVu8cJ12YYg,9488
102
102
  jarvis/jarvis_tools/rewrite_file.py,sha256=eG_WKg6cVAXmuGwUqlWkcuyay5S8DOzEi8vZCmX3O8w,7255
103
103
  jarvis/jarvis_tools/save_memory.py,sha256=DjeFb38OtK9Y_RpWYHz8vL72JdauXZTlc_Y0FUQBtiM,7486
@@ -119,9 +119,9 @@ jarvis/jarvis_utils/methodology.py,sha256=IIMU17WVSunsWXsnXROd4G77LxgYs4xEC_xm_0
119
119
  jarvis/jarvis_utils/output.py,sha256=QRLlKObQKT0KuRSeZRqYb7NlTQvsd1oZXZ41WxeWEuU,10894
120
120
  jarvis/jarvis_utils/tag.py,sha256=f211opbbbTcSyzCDwuIK_oCnKhXPNK-RknYyGzY1yD0,431
121
121
  jarvis/jarvis_utils/utils.py,sha256=LiVui9RMsbfUdzbvBBwbGNC4uniGnLp3LFsk7LXGrQE,47370
122
- jarvis_ai_assistant-0.3.13.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
123
- jarvis_ai_assistant-0.3.13.dist-info/METADATA,sha256=YJ01G5YB7gEX59rrdrPw6xb-0p_pdg-KT5318_yl3t4,18216
124
- jarvis_ai_assistant-0.3.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
- jarvis_ai_assistant-0.3.13.dist-info/entry_points.txt,sha256=4GcWKFxRJD-QU14gw_3ZaW4KuEVxOcZK9i270rwPdjA,1395
126
- jarvis_ai_assistant-0.3.13.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
127
- jarvis_ai_assistant-0.3.13.dist-info/RECORD,,
122
+ jarvis_ai_assistant-0.3.15.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
123
+ jarvis_ai_assistant-0.3.15.dist-info/METADATA,sha256=oVWrBozetAj2ICvlfwVHLPfHqkVdLP2NgTUuWNJ7u14,18216
124
+ jarvis_ai_assistant-0.3.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
+ jarvis_ai_assistant-0.3.15.dist-info/entry_points.txt,sha256=4GcWKFxRJD-QU14gw_3ZaW4KuEVxOcZK9i270rwPdjA,1395
126
+ jarvis_ai_assistant-0.3.15.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
127
+ jarvis_ai_assistant-0.3.15.dist-info/RECORD,,