jarvis-ai-assistant 0.1.189__py3-none-any.whl → 0.1.191__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +11 -39
- jarvis/jarvis_agent/jarvis.py +0 -2
- jarvis/jarvis_git_utils/git_commiter.py +1 -0
- jarvis/jarvis_platform/base.py +11 -6
- jarvis/jarvis_platform/human.py +1 -1
- jarvis/jarvis_platform/kimi.py +1 -1
- jarvis/jarvis_platform/openai.py +1 -1
- jarvis/jarvis_platform/registry.py +1 -1
- jarvis/jarvis_platform/tongyi.py +1 -1
- jarvis/jarvis_platform/yuanbao.py +1 -1
- jarvis/jarvis_platform_manager/main.py +283 -125
- jarvis/jarvis_smart_shell/main.py +1 -1
- jarvis/jarvis_tools/file_analyzer.py +1 -1
- jarvis/jarvis_tools/generate_new_tool.py +88 -40
- jarvis/jarvis_tools/registry.py +5 -3
- jarvis/jarvis_utils/globals.py +45 -0
- jarvis/jarvis_utils/utils.py +16 -0
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/METADATA +62 -3
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/RECORD +24 -24
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.189.dist-info → jarvis_ai_assistant-0.1.191.dist-info}/top_level.txt +0 -0
@@ -48,7 +48,10 @@ def list_platforms():
|
|
48
48
|
PrettyOutput.print(" • 没有可用的模型信息", OutputType.WARNING)
|
49
49
|
|
50
50
|
except Exception as e:
|
51
|
-
PrettyOutput.print(
|
51
|
+
PrettyOutput.print(
|
52
|
+
f"获取 {platform_name} 的模型列表失败: {str(e)}", OutputType.WARNING
|
53
|
+
)
|
54
|
+
|
52
55
|
|
53
56
|
def chat_with_model(platform_name: str, model_name: str):
|
54
57
|
"""Chat with specified platform and model"""
|
@@ -65,14 +68,21 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
65
68
|
# Set model
|
66
69
|
platform.set_model_name(model_name)
|
67
70
|
platform.set_suppress_output(False)
|
68
|
-
PrettyOutput.print(
|
69
|
-
|
71
|
+
PrettyOutput.print(
|
72
|
+
f"连接到 {platform_name} 平台 {model_name} 模型", OutputType.SUCCESS
|
73
|
+
)
|
74
|
+
PrettyOutput.print(
|
75
|
+
"可用命令: /bye - 退出聊天, /clear - 清除会话, /upload - 上传文件, /shell - 执行shell命令, /save - 保存当前对话, /saveall - 保存所有对话",
|
76
|
+
OutputType.INFO,
|
77
|
+
)
|
70
78
|
|
71
79
|
# Start conversation loop
|
72
80
|
while True:
|
73
81
|
# Get user input
|
74
82
|
user_input = get_multiline_input("")
|
75
|
-
conversation_history.append(
|
83
|
+
conversation_history.append(
|
84
|
+
{"role": "user", "content": user_input}
|
85
|
+
) # 记录用户输入
|
76
86
|
|
77
87
|
# Check if input is cancelled
|
78
88
|
if user_input.strip() == "/bye":
|
@@ -99,17 +109,22 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
99
109
|
try:
|
100
110
|
file_path = user_input.strip()[8:].strip()
|
101
111
|
if not file_path:
|
102
|
-
PrettyOutput.print(
|
112
|
+
PrettyOutput.print(
|
113
|
+
'请指定要上传的文件路径,例如: /upload /path/to/file 或 /upload "/path/with spaces/file"',
|
114
|
+
OutputType.WARNING,
|
115
|
+
)
|
103
116
|
continue
|
104
|
-
|
117
|
+
|
105
118
|
# Remove quotes if present
|
106
|
-
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
119
|
+
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
120
|
+
file_path.startswith("'") and file_path.endswith("'")
|
121
|
+
):
|
107
122
|
file_path = file_path[1:-1]
|
108
123
|
|
109
124
|
if not platform.support_upload_files():
|
110
125
|
PrettyOutput.print("平台不支持上传文件", OutputType.ERROR)
|
111
126
|
continue
|
112
|
-
|
127
|
+
|
113
128
|
PrettyOutput.print(f"正在上传文件: {file_path}", OutputType.INFO)
|
114
129
|
if platform.upload_files([file_path]):
|
115
130
|
PrettyOutput.print("文件上传成功", OutputType.SUCCESS)
|
@@ -124,19 +139,26 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
124
139
|
try:
|
125
140
|
file_path = user_input.strip()[5:].strip()
|
126
141
|
if not file_path:
|
127
|
-
PrettyOutput.print(
|
142
|
+
PrettyOutput.print(
|
143
|
+
"请指定保存文件名,例如: /save last_message.txt",
|
144
|
+
OutputType.WARNING,
|
145
|
+
)
|
128
146
|
continue
|
129
|
-
|
147
|
+
|
130
148
|
# Remove quotes if present
|
131
|
-
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
149
|
+
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
150
|
+
file_path.startswith("'") and file_path.endswith("'")
|
151
|
+
):
|
132
152
|
file_path = file_path[1:-1]
|
133
|
-
|
153
|
+
|
134
154
|
# Write last message content to file
|
135
155
|
if conversation_history:
|
136
|
-
with open(file_path,
|
156
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
137
157
|
last_entry = conversation_history[-1]
|
138
158
|
f.write(f"{last_entry['content']}\n")
|
139
|
-
PrettyOutput.print(
|
159
|
+
PrettyOutput.print(
|
160
|
+
f"最后一条消息内容已保存到 {file_path}", OutputType.SUCCESS
|
161
|
+
)
|
140
162
|
else:
|
141
163
|
PrettyOutput.print("没有可保存的消息", OutputType.WARNING)
|
142
164
|
except Exception as e:
|
@@ -148,19 +170,26 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
148
170
|
try:
|
149
171
|
file_path = user_input.strip()[8:].strip()
|
150
172
|
if not file_path:
|
151
|
-
PrettyOutput.print(
|
173
|
+
PrettyOutput.print(
|
174
|
+
"请指定保存文件名,例如: /saveall all_conversations.txt",
|
175
|
+
OutputType.WARNING,
|
176
|
+
)
|
152
177
|
continue
|
153
|
-
|
178
|
+
|
154
179
|
# Remove quotes if present
|
155
|
-
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
180
|
+
if (file_path.startswith('"') and file_path.endswith('"')) or (
|
181
|
+
file_path.startswith("'") and file_path.endswith("'")
|
182
|
+
):
|
156
183
|
file_path = file_path[1:-1]
|
157
|
-
|
184
|
+
|
158
185
|
# Write full conversation history to file
|
159
|
-
with open(file_path,
|
186
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
160
187
|
for entry in conversation_history:
|
161
188
|
f.write(f"{entry['role']}: {entry['content']}\n\n")
|
162
|
-
|
163
|
-
PrettyOutput.print(
|
189
|
+
|
190
|
+
PrettyOutput.print(
|
191
|
+
f"所有对话已保存到 {file_path}", OutputType.SUCCESS
|
192
|
+
)
|
164
193
|
except Exception as e:
|
165
194
|
PrettyOutput.print(f"保存所有对话失败: {str(e)}", OutputType.ERROR)
|
166
195
|
continue
|
@@ -170,15 +199,20 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
170
199
|
try:
|
171
200
|
command = user_input.strip()[6:].strip()
|
172
201
|
if not command:
|
173
|
-
PrettyOutput.print(
|
202
|
+
PrettyOutput.print(
|
203
|
+
"请指定要执行的shell命令,例如: /shell ls -l",
|
204
|
+
OutputType.WARNING,
|
205
|
+
)
|
174
206
|
continue
|
175
|
-
|
207
|
+
|
176
208
|
PrettyOutput.print(f"执行命令: {command}", OutputType.INFO)
|
177
209
|
return_code = os.system(command)
|
178
210
|
if return_code == 0:
|
179
211
|
PrettyOutput.print("命令执行完成", OutputType.SUCCESS)
|
180
212
|
else:
|
181
|
-
PrettyOutput.print(
|
213
|
+
PrettyOutput.print(
|
214
|
+
f"命令执行失败(返回码: {return_code})", OutputType.ERROR
|
215
|
+
)
|
182
216
|
except Exception as ex:
|
183
217
|
PrettyOutput.print(f"执行命令失败: {str(ex)}", OutputType.ERROR)
|
184
218
|
continue
|
@@ -189,7 +223,9 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
189
223
|
if not response:
|
190
224
|
PrettyOutput.print("没有有效的回复", OutputType.WARNING)
|
191
225
|
else:
|
192
|
-
conversation_history.append(
|
226
|
+
conversation_history.append(
|
227
|
+
{"role": "assistant", "content": response}
|
228
|
+
) # 记录模型回复
|
193
229
|
|
194
230
|
except Exception as e:
|
195
231
|
PrettyOutput.print(f"聊天失败: {str(e)}", OutputType.ERROR)
|
@@ -203,28 +239,36 @@ def chat_with_model(platform_name: str, model_name: str):
|
|
203
239
|
except:
|
204
240
|
pass
|
205
241
|
|
242
|
+
|
206
243
|
# Helper function for platform and model validation
|
207
244
|
def validate_platform_model(args):
|
208
245
|
if not args.platform or not args.model:
|
209
|
-
PrettyOutput.print(
|
246
|
+
PrettyOutput.print(
|
247
|
+
"请指定平台和模型。使用 'jarvis info' 查看可用平台和模型。",
|
248
|
+
OutputType.WARNING,
|
249
|
+
)
|
210
250
|
return False
|
211
251
|
return True
|
212
252
|
|
253
|
+
|
213
254
|
def chat_command(args):
|
214
255
|
"""Process chat subcommand"""
|
215
256
|
if not validate_platform_model(args):
|
216
257
|
return
|
217
258
|
chat_with_model(args.platform, args.model)
|
218
259
|
|
260
|
+
|
219
261
|
def info_command(args):
|
220
262
|
"""Process info subcommand"""
|
221
263
|
list_platforms()
|
222
264
|
|
265
|
+
|
223
266
|
# New models for OpenAI-compatible API
|
224
267
|
class ChatMessage(BaseModel):
|
225
268
|
role: str
|
226
269
|
content: str
|
227
270
|
|
271
|
+
|
228
272
|
class ChatCompletionRequest(BaseModel):
|
229
273
|
model: str
|
230
274
|
messages: List[ChatMessage]
|
@@ -232,11 +276,13 @@ class ChatCompletionRequest(BaseModel):
|
|
232
276
|
temperature: Optional[float] = None
|
233
277
|
max_tokens: Optional[int] = None
|
234
278
|
|
279
|
+
|
235
280
|
class ChatCompletionChoice(BaseModel):
|
236
281
|
index: int
|
237
282
|
message: ChatMessage
|
238
283
|
finish_reason: str = "stop"
|
239
284
|
|
285
|
+
|
240
286
|
class ChatCompletionChunk(BaseModel):
|
241
287
|
id: str
|
242
288
|
object: str = "chat.completion.chunk"
|
@@ -244,13 +290,87 @@ class ChatCompletionChunk(BaseModel):
|
|
244
290
|
model: str
|
245
291
|
choices: List[Dict[str, Any]]
|
246
292
|
|
293
|
+
|
247
294
|
class ChatCompletionResponse(BaseModel):
|
248
295
|
id: str
|
249
296
|
object: str = "chat.completion"
|
250
297
|
created: int
|
251
298
|
model: str
|
252
299
|
choices: List[ChatCompletionChoice]
|
253
|
-
usage: Dict[str, int] = Field(
|
300
|
+
usage: Dict[str, int] = Field(
|
301
|
+
default_factory=lambda: {
|
302
|
+
"prompt_tokens": 0,
|
303
|
+
"completion_tokens": 0,
|
304
|
+
"total_tokens": 0,
|
305
|
+
}
|
306
|
+
)
|
307
|
+
|
308
|
+
|
309
|
+
def load_role_config(config_path: str) -> dict:
|
310
|
+
"""从YAML文件加载角色配置
|
311
|
+
|
312
|
+
参数:
|
313
|
+
config_path: YAML配置文件的路径
|
314
|
+
|
315
|
+
返回:
|
316
|
+
dict: 角色配置字典
|
317
|
+
"""
|
318
|
+
import yaml
|
319
|
+
|
320
|
+
if not os.path.exists(config_path):
|
321
|
+
PrettyOutput.print(f"角色配置文件 {config_path} 不存在", OutputType.ERROR)
|
322
|
+
return {}
|
323
|
+
|
324
|
+
with open(config_path, "r", encoding="utf-8", errors="ignore") as f:
|
325
|
+
try:
|
326
|
+
config = yaml.safe_load(f)
|
327
|
+
return config if config else {}
|
328
|
+
except yaml.YAMLError as e:
|
329
|
+
PrettyOutput.print(f"角色配置文件解析失败: {str(e)}", OutputType.ERROR)
|
330
|
+
return {}
|
331
|
+
|
332
|
+
|
333
|
+
def role_command(args):
|
334
|
+
"""Process role subcommand - load role config and start chat"""
|
335
|
+
config = load_role_config(args.config)
|
336
|
+
if not config or "roles" not in config:
|
337
|
+
PrettyOutput.print("无效的角色配置文件", OutputType.ERROR)
|
338
|
+
return
|
339
|
+
|
340
|
+
# 显示可选角色列表
|
341
|
+
PrettyOutput.section("可用角色", OutputType.SUCCESS)
|
342
|
+
for i, role in enumerate(config["roles"], 1):
|
343
|
+
PrettyOutput.print(
|
344
|
+
f"{i}. {role['name']} - {role.get('description', '')}", OutputType.INFO
|
345
|
+
)
|
346
|
+
|
347
|
+
# 让用户选择角色
|
348
|
+
try:
|
349
|
+
choice = int(get_multiline_input("请选择角色(输入编号): "))
|
350
|
+
selected_role = config["roles"][choice - 1]
|
351
|
+
except (ValueError, IndexError):
|
352
|
+
PrettyOutput.print("无效的选择", OutputType.ERROR)
|
353
|
+
return
|
354
|
+
|
355
|
+
# 初始化平台和模型
|
356
|
+
platform_name = selected_role["platform"]
|
357
|
+
model_name = selected_role["model"]
|
358
|
+
system_prompt = selected_role.get("system_prompt", "")
|
359
|
+
|
360
|
+
registry = PlatformRegistry.get_global_platform_registry()
|
361
|
+
platform = registry.create_platform(platform_name)
|
362
|
+
if not platform:
|
363
|
+
PrettyOutput.print(f"创建平台 {platform_name} 失败", OutputType.WARNING)
|
364
|
+
return
|
365
|
+
|
366
|
+
platform.set_model_name(model_name)
|
367
|
+
if system_prompt:
|
368
|
+
platform.set_system_prompt(system_prompt)
|
369
|
+
|
370
|
+
# 开始对话
|
371
|
+
PrettyOutput.print(f"已选择角色: {selected_role['name']}", OutputType.SUCCESS)
|
372
|
+
chat_with_model(platform_name, model_name)
|
373
|
+
|
254
374
|
|
255
375
|
def service_command(args):
|
256
376
|
"""Process service subcommand - start OpenAI-compatible API server"""
|
@@ -282,11 +402,16 @@ def service_command(args):
|
|
282
402
|
|
283
403
|
registry = PlatformRegistry.get_global_platform_registry()
|
284
404
|
|
285
|
-
PrettyOutput.print(
|
405
|
+
PrettyOutput.print(
|
406
|
+
f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS
|
407
|
+
)
|
286
408
|
PrettyOutput.print("This server provides an OpenAI-compatible API", OutputType.INFO)
|
287
409
|
|
288
410
|
if default_platform and default_model:
|
289
|
-
PrettyOutput.print(
|
411
|
+
PrettyOutput.print(
|
412
|
+
f"Default platform: {default_platform}, model: {default_model}",
|
413
|
+
OutputType.INFO,
|
414
|
+
)
|
290
415
|
|
291
416
|
PrettyOutput.print("Available platforms:", OutputType.INFO)
|
292
417
|
|
@@ -306,7 +431,9 @@ def service_command(args):
|
|
306
431
|
if key not in platform_instances:
|
307
432
|
platform = registry.create_platform(platform_name)
|
308
433
|
if not platform:
|
309
|
-
raise HTTPException(
|
434
|
+
raise HTTPException(
|
435
|
+
status_code=400, detail=f"Platform {platform_name} not found"
|
436
|
+
)
|
310
437
|
|
311
438
|
platform.set_model_name(model_name)
|
312
439
|
platform_instances[key] = platform
|
@@ -316,7 +443,9 @@ def service_command(args):
|
|
316
443
|
def log_conversation(conversation_id, messages, model, response=None):
|
317
444
|
"""Log conversation to file in plain text format."""
|
318
445
|
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
319
|
-
log_file = os.path.join(
|
446
|
+
log_file = os.path.join(
|
447
|
+
logs_dir, f"conversation_{conversation_id}_{timestamp}.txt"
|
448
|
+
)
|
320
449
|
|
321
450
|
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
322
451
|
f.write(f"Conversation ID: {conversation_id}\n")
|
@@ -344,12 +473,14 @@ def service_command(args):
|
|
344
473
|
if models:
|
345
474
|
for model_name, _ in models:
|
346
475
|
full_name = f"{default_platform}/{model_name}"
|
347
|
-
model_list.append(
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
476
|
+
model_list.append(
|
477
|
+
{
|
478
|
+
"id": full_name,
|
479
|
+
"object": "model",
|
480
|
+
"created": int(time.time()),
|
481
|
+
"owned_by": default_platform,
|
482
|
+
}
|
483
|
+
)
|
353
484
|
except Exception as e:
|
354
485
|
print(f"Error getting models for {default_platform}: {str(e)}")
|
355
486
|
|
@@ -396,19 +527,21 @@ def service_command(args):
|
|
396
527
|
# Store messages in chat history
|
397
528
|
chat_histories[conversation_id] = {
|
398
529
|
"model": model,
|
399
|
-
"messages": [{"role": m.role, "content": m.content} for m in messages]
|
530
|
+
"messages": [{"role": m.role, "content": m.content} for m in messages],
|
400
531
|
}
|
401
532
|
|
402
533
|
# Log the conversation
|
403
|
-
log_conversation(
|
404
|
-
|
405
|
-
|
534
|
+
log_conversation(
|
535
|
+
conversation_id,
|
536
|
+
[{"role": m.role, "content": m.content} for m in messages],
|
537
|
+
model,
|
538
|
+
)
|
406
539
|
|
407
540
|
if stream:
|
408
541
|
# Return streaming response
|
409
542
|
return StreamingResponse(
|
410
543
|
stream_chat_response(platform, message_text, model),
|
411
|
-
media_type="text/event-stream"
|
544
|
+
media_type="text/event-stream",
|
412
545
|
)
|
413
546
|
else:
|
414
547
|
# Get chat response
|
@@ -420,16 +553,17 @@ def service_command(args):
|
|
420
553
|
|
421
554
|
# Update chat history with response
|
422
555
|
if conversation_id in chat_histories:
|
423
|
-
chat_histories[conversation_id]["messages"].append(
|
424
|
-
"role": "assistant",
|
425
|
-
|
426
|
-
})
|
556
|
+
chat_histories[conversation_id]["messages"].append(
|
557
|
+
{"role": "assistant", "content": response_text}
|
558
|
+
)
|
427
559
|
|
428
560
|
# Log the conversation with response
|
429
|
-
log_conversation(
|
430
|
-
|
431
|
-
|
432
|
-
|
561
|
+
log_conversation(
|
562
|
+
conversation_id,
|
563
|
+
chat_histories[conversation_id]["messages"],
|
564
|
+
model,
|
565
|
+
response_text,
|
566
|
+
)
|
433
567
|
|
434
568
|
return {
|
435
569
|
"id": completion_id,
|
@@ -439,18 +573,16 @@ def service_command(args):
|
|
439
573
|
"choices": [
|
440
574
|
{
|
441
575
|
"index": 0,
|
442
|
-
"message": {
|
443
|
-
|
444
|
-
"content": response_text
|
445
|
-
},
|
446
|
-
"finish_reason": "stop"
|
576
|
+
"message": {"role": "assistant", "content": response_text},
|
577
|
+
"finish_reason": "stop",
|
447
578
|
}
|
448
579
|
],
|
449
580
|
"usage": {
|
450
581
|
"prompt_tokens": len(message_text) // 4, # Rough estimate
|
451
582
|
"completion_tokens": len(response_text) // 4, # Rough estimate
|
452
|
-
"total_tokens": (len(message_text) + len(response_text))
|
453
|
-
|
583
|
+
"total_tokens": (len(message_text) + len(response_text))
|
584
|
+
// 4, # Rough estimate
|
585
|
+
},
|
454
586
|
}
|
455
587
|
except Exception as e:
|
456
588
|
raise HTTPException(status_code=500, detail=str(e))
|
@@ -473,15 +605,13 @@ def service_command(args):
|
|
473
605
|
|
474
606
|
# 修改第一个yield语句的格式
|
475
607
|
initial_data = {
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
'finish_reason': None
|
484
|
-
}]
|
608
|
+
"id": completion_id,
|
609
|
+
"object": "chat.completion.chunk",
|
610
|
+
"created": created_time,
|
611
|
+
"model": model_name,
|
612
|
+
"choices": [
|
613
|
+
{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}
|
614
|
+
],
|
485
615
|
}
|
486
616
|
res = json.dumps(initial_data)
|
487
617
|
yield f"data: {res}\n\n"
|
@@ -498,20 +628,22 @@ def service_command(args):
|
|
498
628
|
# 分成小块以获得更好的流式体验
|
499
629
|
chunk_size = 4 # 每个块的字符数
|
500
630
|
for i in range(0, len(response), chunk_size):
|
501
|
-
chunk = response[i:i+chunk_size]
|
631
|
+
chunk = response[i : i + chunk_size]
|
502
632
|
full_response += chunk
|
503
633
|
|
504
634
|
# 创建并发送块
|
505
635
|
chunk_data = {
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
636
|
+
"id": completion_id,
|
637
|
+
"object": "chat.completion.chunk",
|
638
|
+
"created": created_time,
|
639
|
+
"model": model_name,
|
640
|
+
"choices": [
|
641
|
+
{
|
642
|
+
"index": 0,
|
643
|
+
"delta": {"content": chunk},
|
644
|
+
"finish_reason": None,
|
645
|
+
}
|
646
|
+
],
|
515
647
|
}
|
516
648
|
|
517
649
|
yield f"data: {json.dumps(chunk_data)}\n\n"
|
@@ -521,30 +653,28 @@ def service_command(args):
|
|
521
653
|
else:
|
522
654
|
# 如果没有输出,发送一个空内容块
|
523
655
|
chunk_data = {
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
656
|
+
"id": completion_id,
|
657
|
+
"object": "chat.completion.chunk",
|
658
|
+
"created": created_time,
|
659
|
+
"model": model_name,
|
660
|
+
"choices": [
|
661
|
+
{
|
662
|
+
"index": 0,
|
663
|
+
"delta": {"content": "No response from model."},
|
664
|
+
"finish_reason": None,
|
665
|
+
}
|
666
|
+
],
|
533
667
|
}
|
534
668
|
yield f"data: {json.dumps(chunk_data)}\n\n"
|
535
669
|
full_response = "No response from model."
|
536
670
|
|
537
671
|
# 修改最终yield语句的格式
|
538
672
|
final_data = {
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
'index': 0,
|
545
|
-
'delta': {},
|
546
|
-
'finish_reason': 'stop'
|
547
|
-
}]
|
673
|
+
"id": completion_id,
|
674
|
+
"object": "chat.completion.chunk",
|
675
|
+
"created": created_time,
|
676
|
+
"model": model_name,
|
677
|
+
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
|
548
678
|
}
|
549
679
|
yield f"data: {json.dumps(final_data)}\n\n"
|
550
680
|
|
@@ -553,51 +683,61 @@ def service_command(args):
|
|
553
683
|
|
554
684
|
# 记录对话到文件
|
555
685
|
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
556
|
-
log_file = os.path.join(
|
686
|
+
log_file = os.path.join(
|
687
|
+
logs_dir, f"stream_conversation_{conversation_id}_{timestamp}.json"
|
688
|
+
)
|
557
689
|
|
558
690
|
log_data = {
|
559
691
|
"conversation_id": conversation_id,
|
560
692
|
"timestamp": timestamp,
|
561
693
|
"model": model_name,
|
562
694
|
"message": message,
|
563
|
-
"response": full_response
|
695
|
+
"response": full_response,
|
564
696
|
}
|
565
697
|
|
566
698
|
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
567
699
|
json.dump(log_data, f, ensure_ascii=False, indent=2)
|
568
700
|
|
569
|
-
PrettyOutput.print(
|
701
|
+
PrettyOutput.print(
|
702
|
+
f"Stream conversation logged to {log_file}", OutputType.INFO
|
703
|
+
)
|
570
704
|
|
571
705
|
except Exception as e:
|
572
706
|
# 发送错误消息
|
573
707
|
error_msg = f"Error: {str(e)}"
|
574
708
|
print(f"Streaming error: {error_msg}")
|
575
709
|
|
576
|
-
res = json.dumps(
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
710
|
+
res = json.dumps(
|
711
|
+
{
|
712
|
+
"id": completion_id,
|
713
|
+
"object": "chat.completion.chunk",
|
714
|
+
"created": created_time,
|
715
|
+
"model": model_name,
|
716
|
+
"choices": [
|
717
|
+
{
|
718
|
+
"index": 0,
|
719
|
+
"delta": {"content": error_msg},
|
720
|
+
"finish_reason": "stop",
|
721
|
+
}
|
722
|
+
],
|
723
|
+
}
|
724
|
+
)
|
587
725
|
yield f"data: {res}\n\n"
|
588
726
|
yield f"data: {json.dumps({'error': {'message': error_msg, 'type': 'server_error'}})}\n\n"
|
589
727
|
yield "data: [DONE]\n\n"
|
590
728
|
|
591
729
|
# 记录错误到文件
|
592
730
|
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
593
|
-
log_file = os.path.join(
|
731
|
+
log_file = os.path.join(
|
732
|
+
logs_dir, f"stream_error_{conversation_id}_{timestamp}.json"
|
733
|
+
)
|
594
734
|
|
595
735
|
log_data = {
|
596
736
|
"conversation_id": conversation_id,
|
597
737
|
"timestamp": timestamp,
|
598
738
|
"model": model_name,
|
599
739
|
"message": message,
|
600
|
-
"error": error_msg
|
740
|
+
"error": error_msg,
|
601
741
|
}
|
602
742
|
|
603
743
|
with open(log_file, "w", encoding="utf-8", errors="ignore") as f:
|
@@ -608,40 +748,58 @@ def service_command(args):
|
|
608
748
|
# Run the server
|
609
749
|
uvicorn.run(app, host=host, port=port)
|
610
750
|
|
751
|
+
|
611
752
|
def main():
|
612
753
|
"""Main function"""
|
613
754
|
import argparse
|
614
755
|
|
615
756
|
init_env("欢迎使用 Jarvis-PlatformManager,您的平台管理助手已准备就绪!")
|
616
757
|
|
617
|
-
parser = argparse.ArgumentParser(description=
|
618
|
-
subparsers = parser.add_subparsers(dest=
|
758
|
+
parser = argparse.ArgumentParser(description="Jarvis AI 平台")
|
759
|
+
subparsers = parser.add_subparsers(dest="command", help="可用子命令")
|
619
760
|
|
620
761
|
# info subcommand
|
621
|
-
info_parser = subparsers.add_parser(
|
762
|
+
info_parser = subparsers.add_parser("info", help="显示支持的平台和模型信息")
|
622
763
|
|
623
764
|
# chat subcommand
|
624
|
-
chat_parser = subparsers.add_parser(
|
625
|
-
chat_parser.add_argument(
|
626
|
-
chat_parser.add_argument(
|
765
|
+
chat_parser = subparsers.add_parser("chat", help="与指定平台和模型聊天")
|
766
|
+
chat_parser.add_argument("--platform", "-p", help="指定要使用的平台")
|
767
|
+
chat_parser.add_argument("--model", "-m", help="指定要使用的模型")
|
627
768
|
|
628
769
|
# service subcommand
|
629
|
-
service_parser = subparsers.add_parser(
|
630
|
-
service_parser.add_argument(
|
631
|
-
|
632
|
-
|
633
|
-
service_parser.add_argument(
|
770
|
+
service_parser = subparsers.add_parser("service", help="启动OpenAI兼容的API服务")
|
771
|
+
service_parser.add_argument(
|
772
|
+
"--host", default="127.0.0.1", help="服务主机地址 (默认: 127.0.0.1)"
|
773
|
+
)
|
774
|
+
service_parser.add_argument(
|
775
|
+
"--port", type=int, default=8000, help="服务端口 (默认: 8000)"
|
776
|
+
)
|
777
|
+
service_parser.add_argument(
|
778
|
+
"--platform", "-p", help="指定默认平台,当客户端未指定平台时使用"
|
779
|
+
)
|
780
|
+
service_parser.add_argument(
|
781
|
+
"--model", "-m", help="指定默认模型,当客户端未指定平台时使用"
|
782
|
+
)
|
783
|
+
|
784
|
+
# role subcommand
|
785
|
+
role_parser = subparsers.add_parser("role", help="加载角色配置文件并开始对话")
|
786
|
+
role_parser.add_argument(
|
787
|
+
"--config", "-c", default="~/.jarvis/roles.yaml", help="角色配置文件路径(YAML格式,默认: ~/.jarvis/roles.yaml)"
|
788
|
+
)
|
634
789
|
|
635
790
|
args = parser.parse_args()
|
636
791
|
|
637
|
-
if args.command ==
|
792
|
+
if args.command == "info":
|
638
793
|
info_command(args)
|
639
|
-
elif args.command ==
|
794
|
+
elif args.command == "chat":
|
640
795
|
chat_command(args)
|
641
|
-
elif args.command ==
|
796
|
+
elif args.command == "service":
|
642
797
|
service_command(args)
|
798
|
+
elif args.command == "role":
|
799
|
+
role_command(args)
|
643
800
|
else:
|
644
801
|
parser.print_help()
|
645
802
|
|
803
|
+
|
646
804
|
if __name__ == "__main__":
|
647
|
-
main()
|
805
|
+
main()
|