jarvis-ai-assistant 0.1.190__py3-none-any.whl → 0.1.191__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.1.190"
4
+ __version__ = "0.1.191"
@@ -19,8 +19,8 @@ from jarvis.jarvis_utils.config import (get_max_token_count,
19
19
  is_execute_tool_confirm,
20
20
  is_use_analysis, is_use_methodology)
21
21
  from jarvis.jarvis_utils.embedding import get_context_token_count
22
- from jarvis.jarvis_utils.globals import (delete_agent, make_agent_name,
23
- set_agent)
22
+ from jarvis.jarvis_utils.globals import (delete_agent, get_interrupt, make_agent_name,
23
+ set_agent, set_interrupt)
24
24
  from jarvis.jarvis_utils.input import get_multiline_input
25
25
  from jarvis.jarvis_utils.methodology import (load_methodology,
26
26
  upload_methodology)
@@ -319,7 +319,7 @@ class Agent:
319
319
  </actions>
320
320
  """
321
321
 
322
- self.model.set_system_message(
322
+ self.model.set_system_prompt(
323
323
  f"""
324
324
  {self.system_prompt}
325
325
 
@@ -371,40 +371,6 @@ class Agent:
371
371
  return handler
372
372
  return None
373
373
 
374
- def make_default_addon_prompt(self, need_complete: bool) -> str:
375
- """生成附加提示。
376
-
377
- 参数:
378
- need_complete: 是否需要完成任务
379
-
380
- """
381
- # 结构化系统指令
382
- action_handlers = ", ".join([handler.name() for handler in self.output_handler])
383
-
384
- # 任务完成提示
385
- complete_prompt = (
386
- f"- 输出{ot('!!!COMPLETE!!!')}"
387
- if need_complete and self.auto_complete
388
- else ""
389
- )
390
-
391
- addon_prompt = f"""
392
- <system_prompt>
393
- 请判断是否已经完成任务,如果已经完成:
394
- - 直接输出完成原因,不需要再有新的操作,不要输出{ot("TOOL_CALL")}标签
395
- {complete_prompt}
396
- 如果没有完成,请进行下一步操作:
397
- - 仅包含一个操作
398
- - 如果信息不明确,请请求用户补充
399
- - 如果执行过程中连续失败5次,请使用ask_user询问用户操作
400
- - 操作列表:{action_handlers}
401
- </system_prompt>
402
-
403
- 请继续。
404
- """
405
-
406
- return addon_prompt
407
-
408
374
  def _call_model(self, message: str, need_complete: bool = False) -> str:
409
375
  """调用AI模型并实现重试逻辑
410
376
 
@@ -429,8 +395,6 @@ class Agent:
429
395
  if self.addon_prompt:
430
396
  message += f"\n\n{self.addon_prompt}"
431
397
  self.addon_prompt = ""
432
- else:
433
- message += f"\n\n{self.make_default_addon_prompt(need_complete)}"
434
398
 
435
399
  # 累加对话长度
436
400
  self.conversation_length += get_context_token_count(message)
@@ -812,6 +776,14 @@ arguments:
812
776
  if self.after_tool_call_cb:
813
777
  self.after_tool_call_cb(self)
814
778
 
779
+ if get_interrupt():
780
+ set_interrupt(False)
781
+ user_input = self.multiline_inputer(
782
+ f"模型交互期间被中断,请输入用户干预信息:"
783
+ )
784
+ if user_input:
785
+ self.prompt += f"\n\n用户干预信息:{user_input}"
786
+
815
787
  if self.prompt or self.addon_prompt:
816
788
  continue
817
789
 
@@ -126,8 +126,6 @@ def main() -> None:
126
126
  need_summary=False
127
127
  )
128
128
 
129
- agent.set_addon_prompt(agent.make_default_addon_prompt(False) + "\n如果使用脚本可以完成任务,优先execute_script工具")
130
-
131
129
  # 优先处理命令行直接传入的任务
132
130
  if args.task:
133
131
  agent.run(args.task)
@@ -12,6 +12,7 @@ from yaspin import yaspin
12
12
  from jarvis.jarvis_utils.config import (get_max_input_token_count,
13
13
  get_pretty_output, is_print_prompt)
14
14
  from jarvis.jarvis_utils.embedding import split_text_into_chunks
15
+ from jarvis.jarvis_utils.globals import set_in_chat
15
16
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
16
17
  from jarvis.jarvis_utils.tag import ct, ot
17
18
  from jarvis.jarvis_utils.utils import (get_context_token_count,
@@ -128,10 +129,14 @@ class BasePlatform(ABC):
128
129
 
129
130
  def chat_until_success(self, message: str) -> str:
130
131
  """Chat with model until successful response"""
131
- if not self.suppress_output and is_print_prompt():
132
- PrettyOutput.print(f"{message}", OutputType.USER)
133
- result: str = while_true(lambda: while_success(lambda: self._chat(message), 5), 5)
134
- return result
132
+ try:
133
+ set_in_chat(True)
134
+ if not self.suppress_output and is_print_prompt():
135
+ PrettyOutput.print(f"{message}", OutputType.USER)
136
+ result: str = while_true(lambda: while_success(lambda: self._chat(message), 5), 5)
137
+ return result
138
+ finally:
139
+ set_in_chat(False)
135
140
 
136
141
  @abstractmethod
137
142
  def name(self) -> str:
@@ -144,9 +149,9 @@ class BasePlatform(ABC):
144
149
  raise NotImplementedError("delete_chat is not implemented")
145
150
 
146
151
  @abstractmethod
147
- def set_system_message(self, message: str):
152
+ def set_system_prompt(self, message: str):
148
153
  """Set system message"""
149
- raise NotImplementedError("set_system_message is not implemented")
154
+ raise NotImplementedError("set_system_prompt is not implemented")
150
155
 
151
156
  @abstractmethod
152
157
  def get_model_list(self) -> List[Tuple[str, str]]:
@@ -25,7 +25,7 @@ class HumanPlatform(BasePlatform):
25
25
  self.system_message = "" # 系统消息,用于初始化对话
26
26
  self.first_message = True
27
27
 
28
- def set_system_message(self, message: str):
28
+ def set_system_prompt(self, message: str):
29
29
  """设置系统消息"""
30
30
  self.system_message = message
31
31
 
@@ -41,7 +41,7 @@ class KimiModel(BasePlatform):
41
41
  self.system_message = ""
42
42
  self.model_name = "kimi"
43
43
 
44
- def set_system_message(self, message: str):
44
+ def set_system_prompt(self, message: str):
45
45
  """Set system message"""
46
46
  self.system_message = message
47
47
 
@@ -74,7 +74,7 @@ class OpenAIModel(BasePlatform):
74
74
 
75
75
  self.model_name = model_name
76
76
 
77
- def set_system_message(self, message: str):
77
+ def set_system_prompt(self, message: str):
78
78
  """
79
79
  设置系统消息(角色设定)
80
80
 
@@ -16,7 +16,7 @@ REQUIRED_METHODS = [
16
16
  ('chat', ['message']), # 方法名和参数列表
17
17
  ('name', []),
18
18
  ('delete_chat', []),
19
- ('set_system_message', ['message']),
19
+ ('set_system_prompt', ['message']),
20
20
  ('set_model_name', ['model_name']),
21
21
  ('get_model_list', []),
22
22
  ('upload_files', ['file_list']),
@@ -493,7 +493,7 @@ class TongyiPlatform(BasePlatform):
493
493
  PrettyOutput.print(f"Error deleting chat: {str(e)}", OutputType.ERROR)
494
494
  return False
495
495
 
496
- def set_system_message(self, message: str):
496
+ def set_system_prompt(self, message: str):
497
497
  """Set system message
498
498
 
499
499
  Args:
@@ -45,7 +45,7 @@ class YuanbaoPlatform(BasePlatform):
45
45
  self.model_name = "deep_seek_v3" # 默认模型名称,使用下划线保持一致
46
46
  self.multimedia = []
47
47
 
48
- def set_system_message(self, message: str):
48
+ def set_system_prompt(self, message: str):
49
49
  """设置系统消息"""
50
50
  self.system_message = message
51
51
 
@@ -48,7 +48,10 @@ def list_platforms():
48
48
  PrettyOutput.print(" • 没有可用的模型信息", OutputType.WARNING)
49
49
 
50
50
  except Exception as e:
51
- PrettyOutput.print(f"获取 {platform_name} 的模型列表失败: {str(e)}", OutputType.WARNING)
51
+ PrettyOutput.print(
52
+ f"获取 {platform_name} 的模型列表失败: {str(e)}", OutputType.WARNING
53
+ )
54
+
52
55
 
53
56
  def chat_with_model(platform_name: str, model_name: str):
54
57
  """Chat with specified platform and model"""
@@ -65,14 +68,21 @@ def chat_with_model(platform_name: str, model_name: str):
65
68
  # Set model
66
69
  platform.set_model_name(model_name)
67
70
  platform.set_suppress_output(False)
68
- PrettyOutput.print(f"连接到 {platform_name} 平台 {model_name} 模型", OutputType.SUCCESS)
69
- PrettyOutput.print("可用命令: /bye - 退出聊天, /clear - 清除会话, /upload - 上传文件, /shell - 执行shell命令, /save - 保存当前对话, /saveall - 保存所有对话", OutputType.INFO)
71
+ PrettyOutput.print(
72
+ f"连接到 {platform_name} 平台 {model_name} 模型", OutputType.SUCCESS
73
+ )
74
+ PrettyOutput.print(
75
+ "可用命令: /bye - 退出聊天, /clear - 清除会话, /upload - 上传文件, /shell - 执行shell命令, /save - 保存当前对话, /saveall - 保存所有对话",
76
+ OutputType.INFO,
77
+ )
70
78
 
71
79
  # Start conversation loop
72
80
  while True:
73
81
  # Get user input
74
82
  user_input = get_multiline_input("")
75
- conversation_history.append({"role": "user", "content": user_input}) # 记录用户输入
83
+ conversation_history.append(
84
+ {"role": "user", "content": user_input}
85
+ ) # 记录用户输入
76
86
 
77
87
  # Check if input is cancelled
78
88
  if user_input.strip() == "/bye":
@@ -99,17 +109,22 @@ def chat_with_model(platform_name: str, model_name: str):
99
109
  try:
100
110
  file_path = user_input.strip()[8:].strip()
101
111
  if not file_path:
102
- PrettyOutput.print("请指定要上传的文件路径,例如: /upload /path/to/file 或 /upload \"/path/with spaces/file\"", OutputType.WARNING)
112
+ PrettyOutput.print(
113
+ '请指定要上传的文件路径,例如: /upload /path/to/file 或 /upload "/path/with spaces/file"',
114
+ OutputType.WARNING,
115
+ )
103
116
  continue
104
-
117
+
105
118
  # Remove quotes if present
106
- if (file_path.startswith('"') and file_path.endswith('"')) or (file_path.startswith("'") and file_path.endswith("'")):
119
+ if (file_path.startswith('"') and file_path.endswith('"')) or (
120
+ file_path.startswith("'") and file_path.endswith("'")
121
+ ):
107
122
  file_path = file_path[1:-1]
108
123
 
109
124
  if not platform.support_upload_files():
110
125
  PrettyOutput.print("平台不支持上传文件", OutputType.ERROR)
111
126
  continue
112
-
127
+
113
128
  PrettyOutput.print(f"正在上传文件: {file_path}", OutputType.INFO)
114
129
  if platform.upload_files([file_path]):
115
130
  PrettyOutput.print("文件上传成功", OutputType.SUCCESS)
@@ -124,19 +139,26 @@ def chat_with_model(platform_name: str, model_name: str):
124
139
  try:
125
140
  file_path = user_input.strip()[5:].strip()
126
141
  if not file_path:
127
- PrettyOutput.print("请指定保存文件名,例如: /save last_message.txt", OutputType.WARNING)
142
+ PrettyOutput.print(
143
+ "请指定保存文件名,例如: /save last_message.txt",
144
+ OutputType.WARNING,
145
+ )
128
146
  continue
129
-
147
+
130
148
  # Remove quotes if present
131
- if (file_path.startswith('"') and file_path.endswith('"')) or (file_path.startswith("'") and file_path.endswith("'")):
149
+ if (file_path.startswith('"') and file_path.endswith('"')) or (
150
+ file_path.startswith("'") and file_path.endswith("'")
151
+ ):
132
152
  file_path = file_path[1:-1]
133
-
153
+
134
154
  # Write last message content to file
135
155
  if conversation_history:
136
- with open(file_path, 'w', encoding='utf-8') as f:
156
+ with open(file_path, "w", encoding="utf-8") as f:
137
157
  last_entry = conversation_history[-1]
138
158
  f.write(f"{last_entry['content']}\n")
139
- PrettyOutput.print(f"最后一条消息内容已保存到 {file_path}", OutputType.SUCCESS)
159
+ PrettyOutput.print(
160
+ f"最后一条消息内容已保存到 {file_path}", OutputType.SUCCESS
161
+ )
140
162
  else:
141
163
  PrettyOutput.print("没有可保存的消息", OutputType.WARNING)
142
164
  except Exception as e:
@@ -148,19 +170,26 @@ def chat_with_model(platform_name: str, model_name: str):
148
170
  try:
149
171
  file_path = user_input.strip()[8:].strip()
150
172
  if not file_path:
151
- PrettyOutput.print("请指定保存文件名,例如: /saveall all_conversations.txt", OutputType.WARNING)
173
+ PrettyOutput.print(
174
+ "请指定保存文件名,例如: /saveall all_conversations.txt",
175
+ OutputType.WARNING,
176
+ )
152
177
  continue
153
-
178
+
154
179
  # Remove quotes if present
155
- if (file_path.startswith('"') and file_path.endswith('"')) or (file_path.startswith("'") and file_path.endswith("'")):
180
+ if (file_path.startswith('"') and file_path.endswith('"')) or (
181
+ file_path.startswith("'") and file_path.endswith("'")
182
+ ):
156
183
  file_path = file_path[1:-1]
157
-
184
+
158
185
  # Write full conversation history to file
159
- with open(file_path, 'w', encoding='utf-8') as f:
186
+ with open(file_path, "w", encoding="utf-8") as f:
160
187
  for entry in conversation_history:
161
188
  f.write(f"{entry['role']}: {entry['content']}\n\n")
162
-
163
- PrettyOutput.print(f"所有对话已保存到 {file_path}", OutputType.SUCCESS)
189
+
190
+ PrettyOutput.print(
191
+ f"所有对话已保存到 {file_path}", OutputType.SUCCESS
192
+ )
164
193
  except Exception as e:
165
194
  PrettyOutput.print(f"保存所有对话失败: {str(e)}", OutputType.ERROR)
166
195
  continue
@@ -170,15 +199,20 @@ def chat_with_model(platform_name: str, model_name: str):
170
199
  try:
171
200
  command = user_input.strip()[6:].strip()
172
201
  if not command:
173
- PrettyOutput.print("请指定要执行的shell命令,例如: /shell ls -l", OutputType.WARNING)
202
+ PrettyOutput.print(
203
+ "请指定要执行的shell命令,例如: /shell ls -l",
204
+ OutputType.WARNING,
205
+ )
174
206
  continue
175
-
207
+
176
208
  PrettyOutput.print(f"执行命令: {command}", OutputType.INFO)
177
209
  return_code = os.system(command)
178
210
  if return_code == 0:
179
211
  PrettyOutput.print("命令执行完成", OutputType.SUCCESS)
180
212
  else:
181
- PrettyOutput.print(f"命令执行失败(返回码: {return_code})", OutputType.ERROR)
213
+ PrettyOutput.print(
214
+ f"命令执行失败(返回码: {return_code})", OutputType.ERROR
215
+ )
182
216
  except Exception as ex:
183
217
  PrettyOutput.print(f"执行命令失败: {str(ex)}", OutputType.ERROR)
184
218
  continue
@@ -189,7 +223,9 @@ def chat_with_model(platform_name: str, model_name: str):
189
223
  if not response:
190
224
  PrettyOutput.print("没有有效的回复", OutputType.WARNING)
191
225
  else:
192
- conversation_history.append({"role": "assistant", "content": response}) # 记录模型回复
226
+ conversation_history.append(
227
+ {"role": "assistant", "content": response}
228
+ ) # 记录模型回复
193
229
 
194
230
  except Exception as e:
195
231
  PrettyOutput.print(f"聊天失败: {str(e)}", OutputType.ERROR)
@@ -203,28 +239,36 @@ def chat_with_model(platform_name: str, model_name: str):
203
239
  except:
204
240
  pass
205
241
 
242
+
206
243
  # Helper function for platform and model validation
207
244
  def validate_platform_model(args):
208
245
  if not args.platform or not args.model:
209
- PrettyOutput.print("请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。", OutputType.WARNING)
246
+ PrettyOutput.print(
247
+ "请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。",
248
+ OutputType.WARNING,
249
+ )
210
250
  return False
211
251
  return True
212
252
 
253
+
213
254
  def chat_command(args):
214
255
  """Process chat subcommand"""
215
256
  if not validate_platform_model(args):
216
257
  return
217
258
  chat_with_model(args.platform, args.model)
218
259
 
260
+
219
261
  def info_command(args):
220
262
  """Process info subcommand"""
221
263
  list_platforms()
222
264
 
265
+
223
266
  # New models for OpenAI-compatible API
224
267
  class ChatMessage(BaseModel):
225
268
  role: str
226
269
  content: str
227
270
 
271
+
228
272
  class ChatCompletionRequest(BaseModel):
229
273
  model: str
230
274
  messages: List[ChatMessage]
@@ -232,11 +276,13 @@ class ChatCompletionRequest(BaseModel):
232
276
  temperature: Optional[float] = None
233
277
  max_tokens: Optional[int] = None
234
278
 
279
+
235
280
  class ChatCompletionChoice(BaseModel):
236
281
  index: int
237
282
  message: ChatMessage
238
283
  finish_reason: str = "stop"
239
284
 
285
+
240
286
  class ChatCompletionChunk(BaseModel):
241
287
  id: str
242
288
  object: str = "chat.completion.chunk"
@@ -244,13 +290,87 @@ class ChatCompletionChunk(BaseModel):
244
290
  model: str
245
291
  choices: List[Dict[str, Any]]
246
292
 
293
+
247
294
  class ChatCompletionResponse(BaseModel):
248
295
  id: str
249
296
  object: str = "chat.completion"
250
297
  created: int
251
298
  model: str
252
299
  choices: List[ChatCompletionChoice]
253
- usage: Dict[str, int] = Field(default_factory=lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0})
300
+ usage: Dict[str, int] = Field(
301
+ default_factory=lambda: {
302
+ "prompt_tokens": 0,
303
+ "completion_tokens": 0,
304
+ "total_tokens": 0,
305
+ }
306
+ )
307
+
308
+
309
+ def load_role_config(config_path: str) -> dict:
310
+ """从YAML文件加载角色配置
311
+
312
+ 参数:
313
+ config_path: YAML配置文件的路径
314
+
315
+ 返回:
316
+ dict: 角色配置字典
317
+ """
318
+ import yaml
319
+
320
+ if not os.path.exists(config_path):
321
+ PrettyOutput.print(f"角色配置文件 {config_path} 不存在", OutputType.ERROR)
322
+ return {}
323
+
324
+ with open(config_path, "r", encoding="utf-8", errors="ignore") as f:
325
+ try:
326
+ config = yaml.safe_load(f)
327
+ return config if config else {}
328
+ except yaml.YAMLError as e:
329
+ PrettyOutput.print(f"角色配置文件解析失败: {str(e)}", OutputType.ERROR)
330
+ return {}
331
+
332
+
333
+ def role_command(args):
334
+ """Process role subcommand - load role config and start chat"""
335
+ config = load_role_config(args.config)
336
+ if not config or "roles" not in config:
337
+ PrettyOutput.print("无效的角色配置文件", OutputType.ERROR)
338
+ return
339
+
340
+ # 显示可选角色列表
341
+ PrettyOutput.section("可用角色", OutputType.SUCCESS)
342
+ for i, role in enumerate(config["roles"], 1):
343
+ PrettyOutput.print(
344
+ f"{i}. {role['name']} - {role.get('description', '')}", OutputType.INFO
345
+ )
346
+
347
+ # 让用户选择角色
348
+ try:
349
+ choice = int(get_multiline_input("请选择角色(输入编号): "))
350
+ selected_role = config["roles"][choice - 1]
351
+ except (ValueError, IndexError):
352
+ PrettyOutput.print("无效的选择", OutputType.ERROR)
353
+ return
354
+
355
+ # 初始化平台和模型
356
+ platform_name = selected_role["platform"]
357
+ model_name = selected_role["model"]
358
+ system_prompt = selected_role.get("system_prompt", "")
359
+
360
+ registry = PlatformRegistry.get_global_platform_registry()
361
+ platform = registry.create_platform(platform_name)
362
+ if not platform:
363
+ PrettyOutput.print(f"创建平台 {platform_name} 失败", OutputType.WARNING)
364
+ return
365
+
366
+ platform.set_model_name(model_name)
367
+ if system_prompt:
368
+ platform.set_system_prompt(system_prompt)
369
+
370
+ # 开始对话
371
+ PrettyOutput.print(f"已选择角色: {selected_role['name']}", OutputType.SUCCESS)
372
+ chat_with_model(platform_name, model_name)
373
+
254
374
 
255
375
  def service_command(args):
256
376
  """Process service subcommand - start OpenAI-compatible API server"""
@@ -282,11 +402,16 @@ def service_command(args):
282
402
 
283
403
  registry = PlatformRegistry.get_global_platform_registry()
284
404
 
285
- PrettyOutput.print(f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS)
405
+ PrettyOutput.print(
406
+ f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS
407
+ )
286
408
  PrettyOutput.print("This server provides an OpenAI-compatible API", OutputType.INFO)
287
409
 
288
410
  if default_platform and default_model:
289
- PrettyOutput.print(f"Default platform: {default_platform}, model: {default_model}", OutputType.INFO)
411
+ PrettyOutput.print(
412
+ f"Default platform: {default_platform}, model: {default_model}",
413
+ OutputType.INFO,
414
+ )
290
415
 
291
416
  PrettyOutput.print("Available platforms:", OutputType.INFO)
292
417
 
@@ -306,7 +431,9 @@ def service_command(args):
306
431
  if key not in platform_instances:
307
432
  platform = registry.create_platform(platform_name)
308
433
  if not platform:
309
- raise HTTPException(status_code=400, detail=f"Platform {platform_name} not found")
434
+ raise HTTPException(
435
+ status_code=400, detail=f"Platform {platform_name} not found"
436
+ )
310
437
 
311
438
  platform.set_model_name(model_name)
312
439
  platform_instances[key] = platform
@@ -316,7 +443,9 @@ def service_command(args):
316
443
  def log_conversation(conversation_id, messages, model, response=None):
317
444
  """Log conversation to file in plain text format."""
318
445
  timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
319
- log_file = os.path.join(logs_dir, f"conversation_{conversation_id}_{timestamp}.txt")
446
+ log_file = os.path.join(
447
+ logs_dir, f"conversation_{conversation_id}_{timestamp}.txt"
448
+ )
320
449
 
321
450
  with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
322
451
  f.write(f"Conversation ID: {conversation_id}\n")
@@ -344,12 +473,14 @@ def service_command(args):
344
473
  if models:
345
474
  for model_name, _ in models:
346
475
  full_name = f"{default_platform}/{model_name}"
347
- model_list.append({
348
- "id": full_name,
349
- "object": "model",
350
- "created": int(time.time()),
351
- "owned_by": default_platform
352
- })
476
+ model_list.append(
477
+ {
478
+ "id": full_name,
479
+ "object": "model",
480
+ "created": int(time.time()),
481
+ "owned_by": default_platform,
482
+ }
483
+ )
353
484
  except Exception as e:
354
485
  print(f"Error getting models for {default_platform}: {str(e)}")
355
486
 
@@ -396,19 +527,21 @@ def service_command(args):
396
527
  # Store messages in chat history
397
528
  chat_histories[conversation_id] = {
398
529
  "model": model,
399
- "messages": [{"role": m.role, "content": m.content} for m in messages]
530
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
400
531
  }
401
532
 
402
533
  # Log the conversation
403
- log_conversation(conversation_id,
404
- [{"role": m.role, "content": m.content} for m in messages],
405
- model)
534
+ log_conversation(
535
+ conversation_id,
536
+ [{"role": m.role, "content": m.content} for m in messages],
537
+ model,
538
+ )
406
539
 
407
540
  if stream:
408
541
  # Return streaming response
409
542
  return StreamingResponse(
410
543
  stream_chat_response(platform, message_text, model),
411
- media_type="text/event-stream"
544
+ media_type="text/event-stream",
412
545
  )
413
546
  else:
414
547
  # Get chat response
@@ -420,16 +553,17 @@ def service_command(args):
420
553
 
421
554
  # Update chat history with response
422
555
  if conversation_id in chat_histories:
423
- chat_histories[conversation_id]["messages"].append({
424
- "role": "assistant",
425
- "content": response_text
426
- })
556
+ chat_histories[conversation_id]["messages"].append(
557
+ {"role": "assistant", "content": response_text}
558
+ )
427
559
 
428
560
  # Log the conversation with response
429
- log_conversation(conversation_id,
430
- chat_histories[conversation_id]["messages"],
431
- model,
432
- response_text)
561
+ log_conversation(
562
+ conversation_id,
563
+ chat_histories[conversation_id]["messages"],
564
+ model,
565
+ response_text,
566
+ )
433
567
 
434
568
  return {
435
569
  "id": completion_id,
@@ -439,18 +573,16 @@ def service_command(args):
439
573
  "choices": [
440
574
  {
441
575
  "index": 0,
442
- "message": {
443
- "role": "assistant",
444
- "content": response_text
445
- },
446
- "finish_reason": "stop"
576
+ "message": {"role": "assistant", "content": response_text},
577
+ "finish_reason": "stop",
447
578
  }
448
579
  ],
449
580
  "usage": {
450
581
  "prompt_tokens": len(message_text) // 4, # Rough estimate
451
582
  "completion_tokens": len(response_text) // 4, # Rough estimate
452
- "total_tokens": (len(message_text) + len(response_text)) // 4 # Rough estimate
453
- }
583
+ "total_tokens": (len(message_text) + len(response_text))
584
+ // 4, # Rough estimate
585
+ },
454
586
  }
455
587
  except Exception as e:
456
588
  raise HTTPException(status_code=500, detail=str(e))
@@ -473,15 +605,13 @@ def service_command(args):
473
605
 
474
606
  # 修改第一个yield语句的格式
475
607
  initial_data = {
476
- 'id': completion_id,
477
- 'object': 'chat.completion.chunk',
478
- 'created': created_time,
479
- 'model': model_name,
480
- 'choices': [{
481
- 'index': 0,
482
- 'delta': {'role': 'assistant'},
483
- 'finish_reason': None
484
- }]
608
+ "id": completion_id,
609
+ "object": "chat.completion.chunk",
610
+ "created": created_time,
611
+ "model": model_name,
612
+ "choices": [
613
+ {"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}
614
+ ],
485
615
  }
486
616
  res = json.dumps(initial_data)
487
617
  yield f"data: {res}\n\n"
@@ -498,20 +628,22 @@ def service_command(args):
498
628
  # 分成小块以获得更好的流式体验
499
629
  chunk_size = 4 # 每个块的字符数
500
630
  for i in range(0, len(response), chunk_size):
501
- chunk = response[i:i+chunk_size]
631
+ chunk = response[i : i + chunk_size]
502
632
  full_response += chunk
503
633
 
504
634
  # 创建并发送块
505
635
  chunk_data = {
506
- 'id': completion_id,
507
- 'object': 'chat.completion.chunk',
508
- 'created': created_time,
509
- 'model': model_name,
510
- 'choices': [{
511
- 'index': 0,
512
- 'delta': {'content': chunk},
513
- 'finish_reason': None
514
- }]
636
+ "id": completion_id,
637
+ "object": "chat.completion.chunk",
638
+ "created": created_time,
639
+ "model": model_name,
640
+ "choices": [
641
+ {
642
+ "index": 0,
643
+ "delta": {"content": chunk},
644
+ "finish_reason": None,
645
+ }
646
+ ],
515
647
  }
516
648
 
517
649
  yield f"data: {json.dumps(chunk_data)}\n\n"
@@ -521,30 +653,28 @@ def service_command(args):
521
653
  else:
522
654
  # 如果没有输出,发送一个空内容块
523
655
  chunk_data = {
524
- 'id': completion_id,
525
- 'object': 'chat.completion.chunk',
526
- 'created': created_time,
527
- 'model': model_name,
528
- 'choices': [{
529
- 'index': 0,
530
- 'delta': {'content': "No response from model."},
531
- 'finish_reason': None
532
- }]
656
+ "id": completion_id,
657
+ "object": "chat.completion.chunk",
658
+ "created": created_time,
659
+ "model": model_name,
660
+ "choices": [
661
+ {
662
+ "index": 0,
663
+ "delta": {"content": "No response from model."},
664
+ "finish_reason": None,
665
+ }
666
+ ],
533
667
  }
534
668
  yield f"data: {json.dumps(chunk_data)}\n\n"
535
669
  full_response = "No response from model."
536
670
 
537
671
  # 修改最终yield语句的格式
538
672
  final_data = {
539
- 'id': completion_id,
540
- 'object': 'chat.completion.chunk',
541
- 'created': created_time,
542
- 'model': model_name,
543
- 'choices': [{
544
- 'index': 0,
545
- 'delta': {},
546
- 'finish_reason': 'stop'
547
- }]
673
+ "id": completion_id,
674
+ "object": "chat.completion.chunk",
675
+ "created": created_time,
676
+ "model": model_name,
677
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
548
678
  }
549
679
  yield f"data: {json.dumps(final_data)}\n\n"
550
680
 
@@ -553,51 +683,61 @@ def service_command(args):
553
683
 
554
684
  # 记录对话到文件
555
685
  timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
556
- log_file = os.path.join(logs_dir, f"stream_conversation_{conversation_id}_{timestamp}.json")
686
+ log_file = os.path.join(
687
+ logs_dir, f"stream_conversation_{conversation_id}_{timestamp}.json"
688
+ )
557
689
 
558
690
  log_data = {
559
691
  "conversation_id": conversation_id,
560
692
  "timestamp": timestamp,
561
693
  "model": model_name,
562
694
  "message": message,
563
- "response": full_response
695
+ "response": full_response,
564
696
  }
565
697
 
566
698
  with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
567
699
  json.dump(log_data, f, ensure_ascii=False, indent=2)
568
700
 
569
- PrettyOutput.print(f"Stream conversation logged to {log_file}", OutputType.INFO)
701
+ PrettyOutput.print(
702
+ f"Stream conversation logged to {log_file}", OutputType.INFO
703
+ )
570
704
 
571
705
  except Exception as e:
572
706
  # 发送错误消息
573
707
  error_msg = f"Error: {str(e)}"
574
708
  print(f"Streaming error: {error_msg}")
575
709
 
576
- res = json.dumps({
577
- 'id': completion_id,
578
- 'object': 'chat.completion.chunk',
579
- 'created': created_time,
580
- 'model': model_name,
581
- 'choices': [{
582
- 'index': 0,
583
- 'delta': {'content': error_msg},
584
- 'finish_reason': 'stop'
585
- }]
586
- })
710
+ res = json.dumps(
711
+ {
712
+ "id": completion_id,
713
+ "object": "chat.completion.chunk",
714
+ "created": created_time,
715
+ "model": model_name,
716
+ "choices": [
717
+ {
718
+ "index": 0,
719
+ "delta": {"content": error_msg},
720
+ "finish_reason": "stop",
721
+ }
722
+ ],
723
+ }
724
+ )
587
725
  yield f"data: {res}\n\n"
588
726
  yield f"data: {json.dumps({'error': {'message': error_msg, 'type': 'server_error'}})}\n\n"
589
727
  yield "data: [DONE]\n\n"
590
728
 
591
729
  # 记录错误到文件
592
730
  timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
593
- log_file = os.path.join(logs_dir, f"stream_error_{conversation_id}_{timestamp}.json")
731
+ log_file = os.path.join(
732
+ logs_dir, f"stream_error_{conversation_id}_{timestamp}.json"
733
+ )
594
734
 
595
735
  log_data = {
596
736
  "conversation_id": conversation_id,
597
737
  "timestamp": timestamp,
598
738
  "model": model_name,
599
739
  "message": message,
600
- "error": error_msg
740
+ "error": error_msg,
601
741
  }
602
742
 
603
743
  with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
@@ -608,40 +748,58 @@ def service_command(args):
608
748
  # Run the server
609
749
  uvicorn.run(app, host=host, port=port)
610
750
 
751
+
611
752
  def main():
612
753
  """Main function"""
613
754
  import argparse
614
755
 
615
756
  init_env("欢迎使用 Jarvis-PlatformManager,您的平台管理助手已准备就绪!")
616
757
 
617
- parser = argparse.ArgumentParser(description='Jarvis AI 平台')
618
- subparsers = parser.add_subparsers(dest='command', help='可用子命令')
758
+ parser = argparse.ArgumentParser(description="Jarvis AI 平台")
759
+ subparsers = parser.add_subparsers(dest="command", help="可用子命令")
619
760
 
620
761
  # info subcommand
621
- info_parser = subparsers.add_parser('info', help='显示支持的平台和模型信息')
762
+ info_parser = subparsers.add_parser("info", help="显示支持的平台和模型信息")
622
763
 
623
764
  # chat subcommand
624
- chat_parser = subparsers.add_parser('chat', help='与指定平台和模型聊天')
625
- chat_parser.add_argument('--platform', '-p', help='指定要使用的平台')
626
- chat_parser.add_argument('--model', '-m', help='指定要使用的模型')
765
+ chat_parser = subparsers.add_parser("chat", help="与指定平台和模型聊天")
766
+ chat_parser.add_argument("--platform", "-p", help="指定要使用的平台")
767
+ chat_parser.add_argument("--model", "-m", help="指定要使用的模型")
627
768
 
628
769
  # service subcommand
629
- service_parser = subparsers.add_parser('service', help='启动OpenAI兼容的API服务')
630
- service_parser.add_argument('--host', default='127.0.0.1', help='服务主机地址 (默认: 127.0.0.1)')
631
- service_parser.add_argument('--port', type=int, default=8000, help='服务端口 (默认: 8000)')
632
- service_parser.add_argument('--platform', '-p', help='指定默认平台,当客户端未指定平台时使用')
633
- service_parser.add_argument('--model', '-m', help='指定默认模型,当客户端未指定模型时使用')
770
+ service_parser = subparsers.add_parser("service", help="启动OpenAI兼容的API服务")
771
+ service_parser.add_argument(
772
+ "--host", default="127.0.0.1", help="服务主机地址 (默认: 127.0.0.1)"
773
+ )
774
+ service_parser.add_argument(
775
+ "--port", type=int, default=8000, help="服务端口 (默认: 8000)"
776
+ )
777
+ service_parser.add_argument(
778
+ "--platform", "-p", help="指定默认平台,当客户端未指定平台时使用"
779
+ )
780
+ service_parser.add_argument(
781
+ "--model", "-m", help="指定默认模型,当客户端未指定平台时使用"
782
+ )
783
+
784
+ # role subcommand
785
+ role_parser = subparsers.add_parser("role", help="加载角色配置文件并开始对话")
786
+ role_parser.add_argument(
787
+ "--config", "-c", default="~/.jarvis/roles.yaml", help="角色配置文件路径(YAML格式,默认: ~/.jarvis/roles.yaml)"
788
+ )
634
789
 
635
790
  args = parser.parse_args()
636
791
 
637
- if args.command == 'info':
792
+ if args.command == "info":
638
793
  info_command(args)
639
- elif args.command == 'chat':
794
+ elif args.command == "chat":
640
795
  chat_command(args)
641
- elif args.command == 'service':
796
+ elif args.command == "service":
642
797
  service_command(args)
798
+ elif args.command == "role":
799
+ role_command(args)
643
800
  else:
644
801
  parser.print_help()
645
802
 
803
+
646
804
  if __name__ == "__main__":
647
- main()
805
+ main()
@@ -98,7 +98,7 @@ def process_request(request: str) -> Optional[str]:
98
98
  输入: "查找Python文件"
99
99
  输出: find . -name "*.py"
100
100
  """
101
- model.set_system_message(system_message)
101
+ model.set_system_prompt(system_message)
102
102
 
103
103
  prefix = f"Current path: {current_path}\n"
104
104
  prefix += f"Current shell: {shell}\n"
@@ -79,7 +79,7 @@ class FileAnalyzerTool:
79
79
  system_message = """你是一个文件分析助手。你的任务是分析提供的文件内容,并根据用户的提示提取关键信息。
80
80
  请保持客观,只关注文件中实际存在的内容。如果无法确定某些信息,请明确指出。
81
81
  请以结构化的方式组织你的回答,使用标题、列表和代码块等格式来提高可读性。"""
82
- platform.set_system_message(system_message)
82
+ platform.set_system_prompt(system_message)
83
83
 
84
84
  # 上传文件
85
85
  with yaspin(Spinners.dots, text="正在上传文件...") as spinner:
@@ -622,11 +622,11 @@ class ToolRegistry(OutputHandlerProtocol):
622
622
  """
623
623
  output_parts = []
624
624
  if stdout:
625
- output_parts.append(f"输出:\n{stdout}")
625
+ output_parts.append(f"<output>\n{stdout}\n</output>")
626
626
  if stderr:
627
- output_parts.append(f"错误:\n{stderr}")
627
+ output_parts.append(f"<error>\n{stderr}\n</error>")
628
628
  output = "\n\n".join(output_parts)
629
- return "无输出和错误" if not output else output
629
+ return "<无输出和错误>" if not output else output
630
630
 
631
631
  def _truncate_output(self, output: str) -> str:
632
632
  """截断过长的输出内容
@@ -21,6 +21,10 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
21
21
  # 全局代理管理
22
22
  global_agents: Set[str] = set()
23
23
  current_agent_name: str = ""
24
+ # 表示与大模型交互的深度(>0表示正在交互)
25
+ g_in_chat: int = 0
26
+ # 表示是否接收到中断信号
27
+ g_interrupt: bool = False
24
28
  # 使用自定义主题配置rich控制台
25
29
  custom_theme = Theme({
26
30
  "INFO": "yellow",
@@ -83,3 +87,44 @@ def delete_agent(agent_name: str) -> None:
83
87
  global_agents.remove(agent_name)
84
88
  global current_agent_name
85
89
  current_agent_name = ""
90
+
91
+ def set_in_chat(status: bool) -> None:
92
+ """
93
+ 设置与大模型交互的状态。
94
+
95
+ 参数:
96
+ status: True表示增加交互深度,False表示减少
97
+ """
98
+ global g_in_chat
99
+ if status:
100
+ g_in_chat += 1
101
+ else:
102
+ g_in_chat = max(0, g_in_chat - 1)
103
+
104
+ def get_in_chat() -> bool:
105
+ """
106
+ 获取当前是否正在与大模型交互的状态。
107
+
108
+ 返回:
109
+ bool: 当前交互状态(>0表示正在交互)
110
+ """
111
+ return g_in_chat > 0
112
+
113
+ def set_interrupt(status: bool) -> None:
114
+ """
115
+ 设置中断信号状态。
116
+
117
+ 参数:
118
+ status: 中断状态
119
+ """
120
+ global g_interrupt
121
+ g_interrupt = status
122
+
123
+ def get_interrupt() -> bool:
124
+ """
125
+ 获取当前中断信号状态。
126
+
127
+ 返回:
128
+ bool: 当前中断状态
129
+ """
130
+ return g_interrupt
@@ -1,6 +1,7 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  import hashlib
3
3
  import os
4
+ import signal
4
5
  import subprocess
5
6
  import tarfile
6
7
  import time
@@ -14,6 +15,7 @@ from jarvis.jarvis_utils.config import get_data_dir, get_max_big_content_size, s
14
15
  from jarvis.jarvis_utils.embedding import get_context_token_count
15
16
  from jarvis.jarvis_utils.input import get_single_line_input
16
17
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
18
+ from jarvis.jarvis_utils.globals import get_in_chat, set_interrupt
17
19
 
18
20
 
19
21
 
@@ -25,11 +27,25 @@ def init_env(welcome_str: str, config_file: Optional[str] = None) -> None:
25
27
  3. 处理文件读取异常
26
28
  4. 检查git仓库状态并在落后时更新
27
29
  5. 统计当前命令使用次数
30
+ 6. 注册SIGINT信号处理函数
28
31
 
29
32
  参数:
30
33
  welcome_str: 欢迎信息字符串
31
34
  config_file: 配置文件路径,默认为None(使用~/.jarvis/config.yaml)
32
35
  """
36
+ # 保存原始信号处理函数
37
+ original_sigint = signal.getsignal(signal.SIGINT)
38
+
39
+ def sigint_handler(signum, frame):
40
+ if get_in_chat():
41
+ PrettyOutput.print("接收到SIGINT信号,正在设置中断标志...", OutputType.INFO)
42
+ set_interrupt(True)
43
+ else:
44
+ PrettyOutput.print("接收到SIGINT信号,正在优雅退出...", OutputType.INFO)
45
+ if original_sigint and callable(original_sigint):
46
+ original_sigint(signum, frame)
47
+
48
+ signal.signal(signal.SIGINT, sigint_handler)
33
49
  count_cmd_usage()
34
50
 
35
51
  jarvis_ascii_art = f"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.190
3
+ Version: 0.1.191
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -230,6 +230,54 @@ OPENAI_API_BASE: https://api.openai.com/v1 # 可选,默认为官方API地址
230
230
  | `jarvis-git-details` | - | 使用git details功能 |
231
231
  | `jarvis-methodology` | - | 使用方法论功能 |
232
232
 
233
+ ## 🏗️ 平台管理功能
234
+
235
+ `jarvis-platform-manager` 提供以下子命令来管理AI平台和模型:
236
+
237
+ ### 1. 列出支持的平台和模型
238
+ ```bash
239
+ jarvis-platform-manager info
240
+ ```
241
+ 显示所有支持的AI平台及其可用模型列表。
242
+
243
+ ### 2. 与指定平台和模型聊天
244
+ ```bash
245
+ jarvis-platform-manager chat -p <平台名称> -m <模型名称>
246
+ ```
247
+ 启动交互式聊天会话,支持以下命令:
248
+ - `/bye` - 退出聊天
249
+ - `/clear` - 清除当前会话
250
+ - `/upload <文件路径>` - 上传文件到当前会话
251
+ - `/shell <命令>` - 执行shell命令
252
+ - `/save <文件名>` - 保存最后一条消息
253
+ - `/saveall <文件名>` - 保存完整对话历史
254
+
255
+ ### 3. 启动OpenAI兼容的API服务
256
+ ```bash
257
+ jarvis-platform-manager service --host <IP地址> --port <端口号> -p <平台名称> -m <模型名称>
258
+ ```
259
+ 启动一个兼容OpenAI API的服务,可用于其他应用程序集成。
260
+
261
+ ### 4. 加载角色配置文件
262
+ ```bash
263
+ jarvis-platform-manager role -c <配置文件路径>
264
+ ```
265
+ 从YAML配置文件加载预定义角色进行对话。配置文件格式示例:
266
+ ```yaml
267
+ roles:
268
+ - name: "代码助手"
269
+ description: "专注于代码分析和生成的AI助手"
270
+ platform: "yuanbao"
271
+ model: "deep_seek_v3"
272
+ system_prompt: "你是一个专业的代码助手,专注于分析和生成高质量的代码"
273
+ - name: "文档撰写"
274
+ description: "帮助撰写技术文档的AI助手"
275
+ platform: "kimi"
276
+ model: "k1"
277
+ system_prompt: "你是一个技术文档撰写专家,擅长将复杂技术概念转化为清晰易懂的文字"
278
+ ```
279
+
280
+
233
281
  ---
234
282
 
235
283
  ## ⚙️ 配置说明 <a id="configuration"></a>
@@ -428,7 +476,7 @@ class CustomPlatform(BasePlatform):
428
476
  # 设置模型名称
429
477
  pass
430
478
 
431
- def set_system_message(self, message: str):
479
+ def set_system_prompt(self, message: str):
432
480
  # 设置系统消息
433
481
  pass
434
482
 
@@ -1,8 +1,8 @@
1
- jarvis/__init__.py,sha256=4h6TNgKLF0A6znYlkXoPrVCeP9Pj6DTrNF6NbH9Oroo,74
2
- jarvis/jarvis_agent/__init__.py,sha256=utqI92rkiqiVR2zk5N-IQe2CeMSl-sNiLU429dLoGVw,30487
1
+ jarvis/__init__.py,sha256=cHZhc7P2GKlcTNqDbgIRmnJdpA9XNqfw_meJJM4O-sg,74
2
+ jarvis/jarvis_agent/__init__.py,sha256=NnHJY7yEthpS4w9Yt6cvOeQZGPy-Qx9bnjV-Qo7HE00,29774
3
3
  jarvis/jarvis_agent/builtin_input_handler.py,sha256=f4DaEHPakXcAbgykFP-tiOQP6fh_yGFlZx_h91_j2tQ,1529
4
4
  jarvis/jarvis_agent/file_input_handler.py,sha256=OfoYI5on6w5BDUUg4OadFcfWzMsUF70GNrlt9QyauvA,4181
5
- jarvis/jarvis_agent/jarvis.py,sha256=gOZfTwVlG-GZxPjgCoSiIcFsl4RwwfPA0CGUjE5J7oU,6249
5
+ jarvis/jarvis_agent/jarvis.py,sha256=zfYlwXaZJYfwvNeU5IUSlURyY0pn7QxsHmXBqSptUo8,6105
6
6
  jarvis/jarvis_agent/main.py,sha256=miR8wnWBzmbhOfnscyiKo1oI4wZBRU6FEE-k1lkqtiI,2752
7
7
  jarvis/jarvis_agent/output_handler.py,sha256=7qori-RGrQmdiFepoEe3oPPKJIvRt90l_JDmvCoa4zA,1219
8
8
  jarvis/jarvis_agent/shell_input_handler.py,sha256=pi3AtPKrkKc6K9e99S1djKXQ_XrxtP6FrSWebQmRT6E,1261
@@ -47,17 +47,17 @@ jarvis/jarvis_methodology/main.py,sha256=HhEArlKI5PCpGnBCwVrXMuDn2z84LgpgK7-aGSQ
47
47
  jarvis/jarvis_multi_agent/__init__.py,sha256=Xab5sFltJmX_9MoXqanmZs6FqKfUb2v_pG29Vk8ZXaw,4311
48
48
  jarvis/jarvis_multi_agent/main.py,sha256=KeGv8sdpSgTjW6VE4-tQ8BWDC_a0aE_4R3OqzPBd5N4,1646
49
49
  jarvis/jarvis_platform/__init__.py,sha256=0YnsUoM4JkIBOtImFdjfuDbrqQZT3dEaAwSJ62DrpCc,104
50
- jarvis/jarvis_platform/base.py,sha256=C_50l5kc2P1OP8NH4JbaMZdY-aWOfhfuECoUlRCriU8,7029
51
- jarvis/jarvis_platform/human.py,sha256=xwaTZ1zdrAYZZFXxkbHvUdECwCGsic0kgAFUncUr45g,2567
52
- jarvis/jarvis_platform/kimi.py,sha256=5-LUcvBoL_1Y8HZom9pkNFHO7ghstNCPEobVrVESOi4,12101
53
- jarvis/jarvis_platform/openai.py,sha256=VyX3bR1rGxrJdWOtUBf8PgSL9n06KaNbOewL1urzOnk,4741
54
- jarvis/jarvis_platform/registry.py,sha256=3djxE8AB4gwrdAOvRSL0612Rt_CcsaZhzZ0_oXHu6xk,7820
55
- jarvis/jarvis_platform/tongyi.py,sha256=1cecb2GofJ_7J3xEj_fBj4Ns7XuGIn5CpCi_DFmiP4s,21063
56
- jarvis/jarvis_platform/yuanbao.py,sha256=yOj5T3lo45ULrFkwHXdAsiLhfv141V0sPQplHGeLKNg,20751
50
+ jarvis/jarvis_platform/base.py,sha256=MAY2Xe8WECOfisd-7_F8LXqzsIswkVwlVzXEj-D5Vlg,7186
51
+ jarvis/jarvis_platform/human.py,sha256=MkKdwZ8oY5eacjHOEjUCUwDCJJnXtlzU8o8_jJAMdaA,2566
52
+ jarvis/jarvis_platform/kimi.py,sha256=m45UlTkE3XhZZ3XfQk4degpKWsy5yrdzBHi9pDvmoZk,12100
53
+ jarvis/jarvis_platform/openai.py,sha256=Kj0ZUQ9RRx1fpCN8J8sXcuXnK5bp9AmhzlcZLH9aWsc,4740
54
+ jarvis/jarvis_platform/registry.py,sha256=qq19f9HoISxpVf09t1oEuOgzLXP8QT1mDzWAI5ifIHc,7819
55
+ jarvis/jarvis_platform/tongyi.py,sha256=Q0MCqKofuoQpp6XiYBdgO6LA4vJPEiTvVvKGgwJcpII,21062
56
+ jarvis/jarvis_platform/yuanbao.py,sha256=wpJxYS8lF9c-1F5S6pFSdKhipXkQREtErtn5WgCblCQ,20750
57
57
  jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
- jarvis/jarvis_platform_manager/main.py,sha256=7XfiP19Gv88sFkS__v83a-JTI-VUuyH3lWLr4_jTq1w,25863
58
+ jarvis/jarvis_platform_manager/main.py,sha256=klN8c0IItE1wg1V2tPkh2RoEJxXXCgO--Hf7mpmY39I,29558
59
59
  jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
- jarvis/jarvis_smart_shell/main.py,sha256=k59o5UD7merbsPhJQzae95ThTmZY2EcNHB3Ov6kb0PA,5291
60
+ jarvis/jarvis_smart_shell/main.py,sha256=pUoRsAbY2_BNqtVGEOin8UJVb8gmL_-Aj_NITJ9k0eo,5290
61
61
  jarvis/jarvis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
62
  jarvis/jarvis_tools/ask_user.py,sha256=qwxwJIL698rEWdi1txxlPgIr4UFuihfe--NqBEYhIQQ,2168
63
63
  jarvis/jarvis_tools/base.py,sha256=OdlvzUjYQBmZIMcAeBxAqIQo2urh126OerArK-wOPzU,1191
@@ -67,13 +67,13 @@ jarvis/jarvis_tools/create_code_agent.py,sha256=-nHfo5O5pDIG5IX3w1ClQafGvGcdI2_w
67
67
  jarvis/jarvis_tools/create_sub_agent.py,sha256=lyFrrg4V0yXULmU3vldwGp_euZjwZzJcRU6mJ20zejY,3023
68
68
  jarvis/jarvis_tools/edit_file.py,sha256=s8HqG8qHDrYjCwIioeBpGvw7Aw-iEEZoUyRJFqdjcQA,18453
69
69
  jarvis/jarvis_tools/execute_script.py,sha256=IA1SkcnwBB9PKG2voBNx5N9GXL303OC7OOtdqRfqWOk,6428
70
- jarvis/jarvis_tools/file_analyzer.py,sha256=UuQmti-eBocJB6ivMINmOvSuXxBxOqmbQ3RsQlyueWs,4918
70
+ jarvis/jarvis_tools/file_analyzer.py,sha256=jl9phaN6BqMcgrikMeaxY-9VYXbXQOO1Zu61fZocGv0,4917
71
71
  jarvis/jarvis_tools/file_operation.py,sha256=WloC1-oPJLwgICu4WBc9f7XA8N_Ggl73QQ5CxM2XTlE,9464
72
72
  jarvis/jarvis_tools/generate_new_tool.py,sha256=KZX4wpSpBZ4S5817zAN5j7AAirtgBCrNUmjrpfL9dNI,7706
73
73
  jarvis/jarvis_tools/methodology.py,sha256=m7cQmVhhQpUUl_uYTVvcW0JBovQLx5pWTXh_8K77HsU,5237
74
74
  jarvis/jarvis_tools/read_code.py,sha256=pL2SwZDsJbJMXo4stW96quFsLgbtPVIAW-h4sDKsLtM,6274
75
75
  jarvis/jarvis_tools/read_webpage.py,sha256=PFAYuKjay9j6phWzyuZ99ZfNaHJljmRWAgS0bsvbcvE,2219
76
- jarvis/jarvis_tools/registry.py,sha256=339NBh4qZHsiBKMHgKV2kgDVhEkeZqiSJnZTedGMK8o,25162
76
+ jarvis/jarvis_tools/registry.py,sha256=6iroEOm75KDhgSM0HHb1p8Sn2lLemZ29nSw7O6IzRPY,25186
77
77
  jarvis/jarvis_tools/rewrite_file.py,sha256=3V2l7kG5DG9iRimBce-1qCRuJPL0QM32SBTzOl2zCqM,7004
78
78
  jarvis/jarvis_tools/search_web.py,sha256=rzxrCOTEo-MmLQrKI4k-AbfidUfJUeCPK4f5ZJy48G8,952
79
79
  jarvis/jarvis_tools/virtual_tty.py,sha256=8E_n-eC-RRPTqYx6BI5Q2RnorY8dbhKFBfAjIiRQROA,16397
@@ -85,15 +85,15 @@ jarvis/jarvis_utils/config.py,sha256=Z7pZsSYXJkc2RzUhJ-_VvQA3xOLo6LEo4nEE1ftyQY8
85
85
  jarvis/jarvis_utils/embedding.py,sha256=J8YAqIEj16TJIPEG24uvUlPHeN-5zq0JW_hbNLizQug,3832
86
86
  jarvis/jarvis_utils/file_processors.py,sha256=G5kQI7vCGIDnjgAB5J1dYIR102u6WUv3IhcWFfDh_gs,2977
87
87
  jarvis/jarvis_utils/git_utils.py,sha256=k0rrMAbKwnD7hztmtegxtFFiCzyID4p2oHKTycE2Q-4,15070
88
- jarvis/jarvis_utils/globals.py,sha256=6JWtB1XoD-wEFiMzZNA790ixlZ_OsJEYUM_B8EwkOE8,2277
88
+ jarvis/jarvis_utils/globals.py,sha256=13mShJwkBQxyilUZVIThN11j17QQDGHRjS3UMRMewac,3231
89
89
  jarvis/jarvis_utils/input.py,sha256=FkLW7MXL8awQUghFLQnW1r5F1wV8K3EZeVPwHFRHJTo,7458
90
90
  jarvis/jarvis_utils/methodology.py,sha256=6vf__ahwJZ2I62mWGAvh2C-G6pq930Dh_EkrY1VpduQ,8485
91
91
  jarvis/jarvis_utils/output.py,sha256=QboL42GtG_dnvd1O64sl8o72mEBhXNRADPXQMXgDE7Q,9661
92
92
  jarvis/jarvis_utils/tag.py,sha256=YJHmuedLb7_AiqvKQetHr4R1FxyzIh7HN0RRkWMmYbU,429
93
- jarvis/jarvis_utils/utils.py,sha256=dTFIN6EV48BuC4VOyvcVcj4P0tsWysc9ennbMRhLJjk,10960
94
- jarvis_ai_assistant-0.1.190.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
95
- jarvis_ai_assistant-0.1.190.dist-info/METADATA,sha256=3Dz7yIwWszneilEi9OfdiqzumWW0ruMLLjbzqZKC7RA,16257
96
- jarvis_ai_assistant-0.1.190.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
97
- jarvis_ai_assistant-0.1.190.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
98
- jarvis_ai_assistant-0.1.190.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
99
- jarvis_ai_assistant-0.1.190.dist-info/RECORD,,
93
+ jarvis/jarvis_utils/utils.py,sha256=j9L5kPT3jOYP0PxiAywJtbfbMM4Fu7MyKgjfPKKACkE,11649
94
+ jarvis_ai_assistant-0.1.191.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
95
+ jarvis_ai_assistant-0.1.191.dist-info/METADATA,sha256=GL95Op-eQIglzKxOYSL4AyfFsTifEbtxoRoPBkbtHNc,17829
96
+ jarvis_ai_assistant-0.1.191.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
97
+ jarvis_ai_assistant-0.1.191.dist-info/entry_points.txt,sha256=Gy3DOP1PYLMK0GCj4rrP_9lkOyBQ39EK_lKGUSwn41E,869
98
+ jarvis_ai_assistant-0.1.191.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
99
+ jarvis_ai_assistant-0.1.191.dist-info/RECORD,,