jarvis-ai-assistant 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/jarvis_agent/jarvis.py +61 -59
  3. jarvis/jarvis_agent/main.py +42 -40
  4. jarvis/jarvis_agent/prompts.py +26 -4
  5. jarvis/jarvis_code_agent/code_agent.py +35 -31
  6. jarvis/jarvis_code_analysis/code_review.py +73 -39
  7. jarvis/jarvis_data/config_schema.json +67 -12
  8. jarvis/jarvis_git_squash/main.py +16 -12
  9. jarvis/jarvis_git_utils/git_commiter.py +25 -20
  10. jarvis/jarvis_methodology/main.py +34 -49
  11. jarvis/jarvis_multi_agent/main.py +28 -23
  12. jarvis/jarvis_platform/ai8.py +31 -22
  13. jarvis/jarvis_platform/kimi.py +31 -61
  14. jarvis/jarvis_platform/tongyi.py +71 -85
  15. jarvis/jarvis_platform/yuanbao.py +44 -50
  16. jarvis/jarvis_platform_manager/main.py +55 -90
  17. jarvis/jarvis_rag/cli.py +79 -23
  18. jarvis/jarvis_rag/query_rewriter.py +61 -12
  19. jarvis/jarvis_rag/rag_pipeline.py +143 -34
  20. jarvis/jarvis_rag/retriever.py +5 -5
  21. jarvis/jarvis_smart_shell/main.py +58 -87
  22. jarvis/jarvis_tools/cli/main.py +120 -153
  23. jarvis/jarvis_tools/generate_new_tool.py +22 -1
  24. jarvis/jarvis_tools/registry.py +1 -7
  25. jarvis/jarvis_tools/search_web.py +12 -10
  26. jarvis/jarvis_utils/config.py +92 -11
  27. jarvis/jarvis_utils/globals.py +29 -8
  28. jarvis/jarvis_utils/http.py +58 -79
  29. jarvis/jarvis_utils/input.py +114 -121
  30. jarvis/jarvis_utils/output.py +1 -1
  31. jarvis/jarvis_utils/utils.py +3 -0
  32. jarvis_ai_assistant-0.2.3.dist-info/METADATA +301 -0
  33. {jarvis_ai_assistant-0.2.1.dist-info → jarvis_ai_assistant-0.2.3.dist-info}/RECORD +37 -40
  34. {jarvis_ai_assistant-0.2.1.dist-info → jarvis_ai_assistant-0.2.3.dist-info}/entry_points.txt +0 -2
  35. jarvis/jarvis_git_details/__init__.py +0 -0
  36. jarvis/jarvis_git_details/main.py +0 -265
  37. jarvis/jarvis_platform/oyi.py +0 -357
  38. jarvis_ai_assistant-0.2.1.dist-info/METADATA +0 -845
  39. {jarvis_ai_assistant-0.2.1.dist-info → jarvis_ai_assistant-0.2.3.dist-info}/WHEEL +0 -0
  40. {jarvis_ai_assistant-0.2.1.dist-info → jarvis_ai_assistant-0.2.3.dist-info}/licenses/LICENSE +0 -0
  41. {jarvis_ai_assistant-0.2.1.dist-info → jarvis_ai_assistant-0.2.3.dist-info}/top_level.txt +0 -0
@@ -156,39 +156,24 @@ class KimiModel(BasePlatform):
156
156
  sleep_time=5,
157
157
  )
158
158
 
159
- response_data = b""
160
-
161
159
  # 处理流式响应
162
- for chunk in response_stream:
163
- response_data += chunk
164
-
165
- # 尝试解析SSE格式的数据
166
- try:
167
- # 查找完整的数据行
168
- lines = response_data.decode("utf-8").split("\n")
169
- response_data = b"" # 重置缓冲区
170
-
171
- for line in lines:
172
- if not line.strip():
173
- continue
174
-
175
- # SSE格式的行通常以"data: "开头
176
- if line.startswith("data: "):
177
- try:
178
- data = json.loads(line[6:])
179
- if data.get("event") == "resp":
180
- status = data.get("file_info", {}).get("status")
181
- if status == "parsed":
182
- return True
183
- elif status == "failed":
184
- return False
185
- except json.JSONDecodeError:
186
- continue
187
-
188
- except UnicodeDecodeError:
189
- # 如果解码失败,继续累积数据
160
+ for line in response_stream:
161
+ if not line.strip():
190
162
  continue
191
163
 
164
+ # SSE格式的行通常以"data: "开头
165
+ if line.startswith("data: "):
166
+ try:
167
+ data = json.loads(line[6:])
168
+ if data.get("event") == "resp":
169
+ status = data.get("file_info", {}).get("status")
170
+ if status == "parsed":
171
+ return True
172
+ elif status == "failed":
173
+ return False
174
+ except json.JSONDecodeError:
175
+ continue
176
+
192
177
  retry_count += 1
193
178
  time.sleep(1)
194
179
 
@@ -299,40 +284,25 @@ class KimiModel(BasePlatform):
299
284
  sleep_time=5,
300
285
  )
301
286
 
302
- response_data = b""
303
-
304
287
  # 处理流式响应
305
- for chunk in response_stream:
306
- response_data += chunk
307
-
308
- # 尝试解析SSE格式的数据
309
- try:
310
- # 查找完整的数据行
311
- lines = response_data.decode("utf-8").split("\n")
312
- response_data = b"" # 重置缓冲区
313
-
314
- for line in lines:
315
- if not line.strip():
316
- continue
317
-
318
- # SSE格式的行通常以"data: "开头
319
- if line.startswith("data: "):
320
- try:
321
- data = json.loads(line[6:])
322
- event = data.get("event")
323
-
324
- if event == "cmpl":
325
- # 处理补全文本
326
- text = data.get("text", "")
327
- if text:
328
- yield text
329
- except json.JSONDecodeError:
330
- continue
331
-
332
- except UnicodeDecodeError:
333
- # 如果解码失败,继续累积数据
288
+ for line in response_stream:
289
+ if not line.strip():
334
290
  continue
335
291
 
292
+ # SSE格式的行通常以"data: "开头
293
+ if line.startswith("data: "):
294
+ try:
295
+ data = json.loads(line[6:])
296
+ event = data.get("event")
297
+
298
+ if event == "cmpl":
299
+ # 处理补全文本
300
+ text = data.get("text", "")
301
+ if text:
302
+ yield text
303
+ except json.JSONDecodeError:
304
+ continue
305
+
336
306
  return None
337
307
 
338
308
  except Exception as e:
@@ -8,9 +8,9 @@ from typing import Any, Dict, Generator, List, Tuple
8
8
  from jarvis.jarvis_platform.base import BasePlatform
9
9
  from jarvis.jarvis_utils import http
10
10
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
11
+ from jarvis.jarvis_utils.tag import ot, ct
11
12
  from jarvis.jarvis_utils.utils import while_success
12
13
 
13
-
14
14
  class TongyiPlatform(BasePlatform):
15
15
  """Tongyi platform implementation"""
16
16
 
@@ -81,10 +81,10 @@ class TongyiPlatform(BasePlatform):
81
81
  "contentType": "text",
82
82
  "role": "user",
83
83
  "ext": {
84
- "searchType": "",
84
+ "searchType": "depth" if self.web else "",
85
85
  "pptGenerate": False,
86
- "deepThink": False,
87
- "deepResearch": False,
86
+ "deepThink": self.model_name == "Thinking",
87
+ "deepResearch": self.model_name == "Deep-Research",
88
88
  },
89
89
  }
90
90
  ]
@@ -98,10 +98,10 @@ class TongyiPlatform(BasePlatform):
98
98
  "contentType": "text",
99
99
  "role": "system",
100
100
  "ext": {
101
- "searchType": "",
101
+ "searchType": "depth" if self.web else "",
102
102
  "pptGenerate": False,
103
- "deepThink": False,
104
- "deepResearch": False,
103
+ "deepThink": self.model_name == "Thinking",
104
+ "deepResearch": self.model_name == "Deep-Research",
105
105
  },
106
106
  },
107
107
  )
@@ -140,13 +140,13 @@ class TongyiPlatform(BasePlatform):
140
140
  "parentMsgId": self.msg_id,
141
141
  "params": {
142
142
  "agentId": "",
143
- "searchType": "",
143
+ "searchType": "depth" if self.web else "",
144
144
  "pptGenerate": False,
145
145
  "bizScene": "code_chat" if self.model_name == "Code-Chat" else "",
146
146
  "bizSceneInfo": {},
147
147
  "specifiedModel": "",
148
- "deepThink": True if self.model_name == "Thinking" else False,
149
- "deepResearch": False,
148
+ "deepThink": self.model_name == "Thinking",
149
+ "deepResearch": self.model_name == "Deep-Research",
150
150
  "fileUploadBatchId": (
151
151
  self.uploaded_file_info[0]["batchId"]
152
152
  if self.uploaded_file_info
@@ -168,88 +168,74 @@ class TongyiPlatform(BasePlatform):
168
168
  thinking_content = ""
169
169
  text_content = ""
170
170
  in_thinking = False
171
- response_data = b""
172
171
 
173
172
  # 处理流式响应
174
- for chunk in response_stream:
175
- response_data += chunk
176
-
177
- # 尝试解析SSE格式的数据
178
- try:
179
- # 查找完整的数据行
180
- lines = response_data.decode("utf-8").split("\n")
181
- response_data = b"" # 重置缓冲区
182
-
183
- for line in lines:
184
- if not line.strip():
185
- continue
186
-
187
- # SSE格式的行通常以"data: "开头
188
- if line.startswith("data: "):
189
- try:
190
- data = json.loads(line[6:])
191
- # 记录消息ID和会话ID
192
- if "msgId" in data:
193
- msg_id = data["msgId"]
194
- if "sessionId" in data:
195
- session_id = data["sessionId"]
196
-
197
- if "contents" in data and len(data["contents"]) > 0:
198
- for content in data["contents"]:
199
- if content.get("contentType") == "think":
200
- if not in_thinking:
201
- yield "<think>\n\n"
202
- in_thinking = True
203
- if content.get("incremental"):
204
- tmp_content = json.loads(
205
- content.get("content")
206
- )["content"]
207
- thinking_content += tmp_content
208
- yield tmp_content
209
- else:
210
- tmp_content = json.loads(
211
- content.get("content")
212
- )["content"]
213
- if len(thinking_content) < len(
214
- tmp_content
215
- ):
216
- yield tmp_content[
217
- len(thinking_content) :
218
- ]
219
- thinking_content = tmp_content
220
- else:
221
- yield "\r\n</think>\n"[
222
- len(thinking_content)
223
- - len(tmp_content) :
224
- ]
225
- thinking_content = tmp_content
226
- in_thinking = False
227
- elif content.get("contentType") == "text":
228
- if in_thinking:
229
- continue
230
- if content.get("incremental"):
231
- tmp_content = content.get("content")
232
- text_content += tmp_content
233
- yield tmp_content
234
- else:
235
- tmp_content = content.get("content")
236
- if len(text_content) < len(tmp_content):
237
- yield tmp_content[
238
- len(text_content) :
239
- ]
240
- text_content = tmp_content
241
-
242
- except json.JSONDecodeError:
243
- continue
244
-
245
- except UnicodeDecodeError:
246
- # 如果解码失败,继续累积数据
173
+ for line in response_stream:
174
+ if not line.strip():
247
175
  continue
248
176
 
177
+ # SSE格式的行通常以"data: "开头
178
+ if line.startswith("data: "):
179
+ try:
180
+ data = json.loads(line[6:])
181
+ # 记录消息ID和会话ID
182
+ if "msgId" in data:
183
+ msg_id = data["msgId"]
184
+ if "sessionId" in data:
185
+ session_id = data["sessionId"]
186
+
187
+ if "contents" in data and len(data["contents"]) > 0:
188
+ for content in data["contents"]:
189
+ if content.get("contentType") == "think":
190
+ if not in_thinking:
191
+ yield f"{ot('think')}\n\n"
192
+ in_thinking = True
193
+ if content.get("incremental"):
194
+ tmp_content = json.loads(
195
+ content.get("content")
196
+ )["content"]
197
+ thinking_content += tmp_content
198
+ yield tmp_content
199
+ else:
200
+ tmp_content = json.loads(
201
+ content.get("content")
202
+ )["content"]
203
+ if len(thinking_content) < len(
204
+ tmp_content
205
+ ):
206
+ yield tmp_content[
207
+ len(thinking_content) :
208
+ ]
209
+ thinking_content = tmp_content
210
+ else:
211
+ yield f"\r\n{ct('think')}\n"[
212
+ len(thinking_content)
213
+ - len(tmp_content) :
214
+ ]
215
+ thinking_content = tmp_content
216
+ in_thinking = False
217
+ elif content.get("contentType") == "text":
218
+ if in_thinking:
219
+ continue
220
+ if content.get("incremental"):
221
+ tmp_content = content.get("content")
222
+ text_content += tmp_content
223
+ yield tmp_content
224
+ else:
225
+ tmp_content = content.get("content")
226
+ if len(text_content) < len(tmp_content):
227
+ yield tmp_content[
228
+ len(text_content) :
229
+ ]
230
+ text_content = tmp_content
231
+
232
+ except json.JSONDecodeError:
233
+ continue
234
+
249
235
  self.msg_id = msg_id
250
236
  self.session_id = session_id
251
237
 
252
- return None
238
+ return
253
239
 
254
240
  except Exception as e:
255
241
  raise Exception(f"Chat failed: {str(e)}")
@@ -12,6 +12,7 @@ from PIL import Image # type: ignore
12
12
  from jarvis.jarvis_platform.base import BasePlatform
13
13
  from jarvis.jarvis_utils import http
14
14
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
15
+ from jarvis.jarvis_utils.tag import ot, ct
15
16
  from jarvis.jarvis_utils.utils import while_success
16
17
 
17
18
 
@@ -474,61 +475,54 @@ class YuanbaoPlatform(BasePlatform):
474
475
  )
475
476
 
476
477
  in_thinking = False
477
- response_data = b""
478
478
 
479
479
  # 处理流式响应
480
- for chunk in response_stream:
481
- response_data += chunk
482
-
483
- # 尝试解析SSE格式的数据
484
- try:
485
- # 查找完整的数据行
486
- lines = response_data.decode("utf-8").split("\n")
487
- response_data = b"" # 重置缓冲区
488
-
489
- for line in lines:
490
- if not line.strip():
491
- continue
492
-
493
- # SSE格式的行通常以"data: "开头
494
- if line.startswith("data: "):
495
- try:
496
- data_str = line[6:] # 移除"data: "前缀
497
-
498
- # 检查结束标志
499
- if data_str == "[DONE]":
500
- self.first_chat = False
501
- return None
502
-
503
- data = json.loads(data_str)
504
-
505
- # 处理文本类型的消息
506
- if data.get("type") == "text":
507
- if in_thinking:
508
- yield "</think>\n"
509
- in_thinking = False
510
- msg = data.get("msg", "")
511
- if msg:
512
- yield msg
513
-
514
- # 处理思考中的消息
515
- elif data.get("type") == "think":
516
- if not in_thinking:
517
- yield "<think>\n"
518
- in_thinking = True
519
- think_content = data.get("content", "")
520
- if think_content:
521
- yield think_content
522
-
523
- except json.JSONDecodeError:
524
- pass
525
-
526
- except UnicodeDecodeError:
527
- # 如果解码失败,继续累积数据
480
+ for line in response_stream:
481
+ if not line.strip():
528
482
  continue
529
483
 
484
+ # SSE格式的行通常以"data: "开头
485
+ if line.startswith("data: "):
486
+ try:
487
+ data_str = line[6:] # 移除"data: "前缀
488
+
489
+ # 检查结束标志
490
+ if data_str == "[DONE]":
491
+ self.first_chat = False
492
+ return
493
+
494
+ data = json.loads(data_str)
495
+
496
+ # 处理文本类型的消息
497
+ if data.get("type") == "text":
498
+ if in_thinking:
499
+ yield f"{ct('think')}\n"
500
+ in_thinking = False
501
+ msg = data.get("msg", "")
502
+ if msg:
503
+ yield msg
504
+
505
+ # 处理思考中的消息
506
+ elif data.get("type") == "think":
507
+ if not in_thinking:
508
+ yield f"{ot('think')}\n"
509
+ in_thinking = True
510
+ think_content = data.get("content", "")
511
+ if think_content:
512
+ yield think_content
513
+
514
+ except json.JSONDecodeError:
515
+ pass
516
+ else:
517
+ try:
518
+ data = json.loads(line)
519
+ if "msg" in data:
520
+ yield data["msg"]
521
+ except json.JSONDecodeError:
522
+ pass
523
+
530
524
  self.first_chat = False
531
- return None
525
+ return
532
526
 
533
527
  except Exception as e:
534
528
  raise Exception(f"对话失败: {str(e)}")
@@ -3,17 +3,22 @@
3
3
 
4
4
  该模块提供了Jarvis平台管理器的主要入口点。
5
5
  """
6
- import argparse
7
6
  import os
7
+ import sys
8
8
  from typing import Any, Dict, List, Optional
9
9
 
10
+ import typer
11
+
10
12
  from jarvis.jarvis_platform.registry import PlatformRegistry
11
13
  from jarvis.jarvis_utils.input import get_multiline_input, get_single_line_input
12
14
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
13
15
  from jarvis.jarvis_utils.utils import init_env
14
16
  from jarvis.jarvis_platform_manager.service import start_service
15
17
 
18
+ app = typer.Typer(help="Jarvis AI 平台")
19
+
16
20
 
21
+ @app.command("info")
17
22
  def list_platforms() -> None:
18
23
  """列出所有支持的平台和模型。"""
19
24
  registry = PlatformRegistry.get_global_platform_registry()
@@ -291,8 +296,11 @@ def chat_with_model(platform_name: str, model_name: str, system_prompt: str) ->
291
296
  except Exception as exc:
292
297
  PrettyOutput.print(f"聊天失败: {str(exc)}", OutputType.ERROR)
293
298
 
299
+ except typer.Exit:
300
+ raise
294
301
  except Exception as exc:
295
302
  PrettyOutput.print(f"初始化会话失败: {str(exc)}", OutputType.ERROR)
303
+ sys.exit(1)
296
304
  finally:
297
305
  # Clean up resources
298
306
  try:
@@ -301,16 +309,17 @@ def chat_with_model(platform_name: str, model_name: str, system_prompt: str) ->
301
309
  pass
302
310
 
303
311
 
304
- def validate_platform_model(args: argparse.Namespace) -> bool:
312
+ def validate_platform_model(platform: Optional[str], model: Optional[str]) -> bool:
305
313
  """验证平台和模型参数。
306
314
 
307
315
  参数:
308
- args: 命令行参数。
316
+ platform: 平台名称。
317
+ model: 模型名称。
309
318
 
310
319
  返回:
311
320
  bool: 如果平台和模型有效返回True,否则返回False。
312
321
  """
313
- if not args.platform or not args.model:
322
+ if not platform or not model:
314
323
  PrettyOutput.print(
315
324
  "请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。",
316
325
  OutputType.WARNING,
@@ -319,38 +328,30 @@ def validate_platform_model(args: argparse.Namespace) -> bool:
319
328
  return True
320
329
 
321
330
 
322
- def chat_command(args: argparse.Namespace) -> None:
323
- """处理聊天子命令。
324
-
325
- 参数:
326
- args: 命令行参数。
327
- """
328
- if not validate_platform_model(args):
331
+ @app.command("chat")
332
+ def chat_command(
333
+ platform: Optional[str] = typer.Option(None, "--platform", "-p", help="指定要使用的平台"),
334
+ model: Optional[str] = typer.Option(None, "--model", "-m", help="指定要使用的模型"),
335
+ ) -> None:
336
+ """与指定平台和模型聊天。"""
337
+ if not validate_platform_model(platform, model):
329
338
  return
330
- chat_with_model(args.platform, args.model, "")
339
+ chat_with_model(platform, model, "") # type: ignore
331
340
 
332
341
 
333
- def info_command(args: argparse.Namespace) -> None:
334
- """处理信息子命令。
335
-
336
- 参数:
337
- args: 命令行参数。
338
- """
339
- list_platforms()
340
-
341
-
342
- def service_command(args: argparse.Namespace) -> None:
343
- """处理服务子命令 - 启动OpenAI兼容的API服务。
344
-
345
- 参数:
346
- args: 命令行参数。
347
- """
348
- start_service(
349
- host=args.host,
350
- port=args.port,
351
- default_platform=args.platform,
352
- default_model=args.model,
353
- )
342
+ @app.command("service")
343
+ def service_command(
344
+ host: str = typer.Option("127.0.0.1", help="服务主机地址 (默认: 127.0.0.1)"),
345
+ port: int = typer.Option(8000, help="服务端口 (默认: 8000)"),
346
+ platform: Optional[str] = typer.Option(
347
+ None, "-p", "--platform", help="指定默认平台,当客户端未指定平台时使用"
348
+ ),
349
+ model: Optional[str] = typer.Option(
350
+ None, "-m", "--model", help="指定默认模型,当客户端未指定平台时使用"
351
+ ),
352
+ ) -> None:
353
+ """启动OpenAI兼容的API服务。"""
354
+ start_service(host=host, port=port, default_platform=platform, default_model=model)
354
355
 
355
356
 
356
357
  def load_role_config(config_path: str) -> Dict[str, Any]:
@@ -377,13 +378,24 @@ def load_role_config(config_path: str) -> Dict[str, Any]:
377
378
  return {}
378
379
 
379
380
 
380
- def role_command(args: argparse.Namespace) -> None:
381
- """Process role subcommand - load role config and start chat.
382
-
383
- Args:
384
- args: Command line arguments.
385
- """
386
- config = load_role_config(args.config)
381
+ @app.command("role")
382
+ def role_command(
383
+ config_file: str = typer.Option(
384
+ "~/.jarvis/roles.yaml",
385
+ "--config",
386
+ "-c",
387
+ help="角色配置文件路径(YAML格式,默认: ~/.jarvis/roles.yaml)",
388
+ ),
389
+ platform: Optional[str] = typer.Option(
390
+ None, "--platform", "-p", help="指定要使用的平台,覆盖角色配置"
391
+ ),
392
+ model: Optional[str] = typer.Option(
393
+ None, "--model", "-m", help="指定要使用的模型,覆盖角色配置"
394
+ ),
395
+ ) -> None:
396
+ """加载角色配置文件并开始对话。"""
397
+ config_path = os.path.expanduser(config_file)
398
+ config = load_role_config(config_path)
387
399
  if not config or "roles" not in config:
388
400
  PrettyOutput.print("无效的角色配置文件", OutputType.ERROR)
389
401
  return
@@ -407,8 +419,8 @@ def role_command(args: argparse.Namespace) -> None:
407
419
  return
408
420
 
409
421
  # 初始化平台和模型
410
- platform_name = args.platform or selected_role["platform"]
411
- model_name = args.model or selected_role["model"]
422
+ platform_name = platform or selected_role["platform"]
423
+ model_name = model or selected_role["model"]
412
424
  system_prompt = selected_role.get("system_prompt", "")
413
425
 
414
426
  # 开始对话
@@ -419,54 +431,7 @@ def role_command(args: argparse.Namespace) -> None:
419
431
  def main() -> None:
420
432
  """Jarvis平台管理器的主入口点。"""
421
433
  init_env("欢迎使用 Jarvis-PlatformManager,您的平台管理助手已准备就绪!")
422
-
423
- parser = argparse.ArgumentParser(description="Jarvis AI 平台")
424
- subparsers = parser.add_subparsers(dest="command", help="可用子命令")
425
-
426
- # info subcommand
427
- info_parser = subparsers.add_parser("info", help="显示支持的平台和模型信息")
428
- info_parser.set_defaults(func=info_command)
429
-
430
- # chat subcommand
431
- chat_parser = subparsers.add_parser("chat", help="与指定平台和模型聊天")
432
- chat_parser.add_argument("--platform", "-p", help="指定要使用的平台")
433
- chat_parser.add_argument("--model", "-m", help="指定要使用的模型")
434
- chat_parser.set_defaults(func=chat_command)
435
-
436
- # service subcommand
437
- service_parser = subparsers.add_parser("service", help="启动OpenAI兼容的API服务")
438
- service_parser.add_argument(
439
- "--host", default="127.0.0.1", help="服务主机地址 (默认: 127.0.0.1)"
440
- )
441
- service_parser.add_argument(
442
- "--port", type=int, default=8000, help="服务端口 (默认: 8000)"
443
- )
444
- service_parser.add_argument(
445
- "--platform", "-p", help="指定默认平台,当客户端未指定平台时使用"
446
- )
447
- service_parser.add_argument(
448
- "--model", "-m", help="指定默认模型,当客户端未指定平台时使用"
449
- )
450
- service_parser.set_defaults(func=service_command)
451
-
452
- # role subcommand
453
- role_parser = subparsers.add_parser("role", help="加载角色配置文件并开始对话")
454
- role_parser.add_argument(
455
- "--config",
456
- "-c",
457
- default="~/.jarvis/roles.yaml",
458
- help="角色配置文件路径(YAML格式,默认: ~/.jarvis/roles.yaml)",
459
- )
460
- role_parser.add_argument("--platform", "-p", help="指定要使用的平台,覆盖角色配置")
461
- role_parser.add_argument("--model", "-m", help="指定要使用的模型,覆盖角色配置")
462
- role_parser.set_defaults(func=role_command)
463
-
464
- args = parser.parse_args()
465
-
466
- if hasattr(args, "func"):
467
- args.func(args)
468
- else:
469
- parser.print_help()
434
+ app()
470
435
 
471
436
 
472
437
  if __name__ == "__main__":