jarvis-ai-assistant 0.1.173__py3-none-any.whl → 0.1.175__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jarvis-ai-assistant might be problematic. Click here for more details.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +13 -88
- jarvis/jarvis_code_agent/code_agent.py +2 -1
- jarvis/jarvis_dev/main.py +6 -6
- jarvis/jarvis_methodology/main.py +4 -4
- jarvis/jarvis_multi_agent/__init__.py +16 -22
- jarvis/jarvis_platform/base.py +4 -3
- jarvis/jarvis_platform/kimi.py +124 -54
- jarvis/jarvis_platform/openai.py +29 -8
- jarvis/jarvis_platform/yuanbao.py +79 -37
- jarvis/jarvis_platform_manager/main.py +52 -1
- jarvis/jarvis_tools/cli/main.py +31 -12
- jarvis/jarvis_tools/registry.py +8 -8
- jarvis/jarvis_utils/config.py +1 -1
- jarvis/jarvis_utils/embedding.py +49 -108
- jarvis/jarvis_utils/methodology.py +37 -0
- jarvis/jarvis_utils/output.py +1 -20
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/METADATA +1 -1
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/RECORD +23 -23
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.173.dist-info → jarvis_ai_assistant-0.1.175.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
jarvis/jarvis_agent/__init__.py
CHANGED
|
@@ -11,7 +11,7 @@ from jarvis.jarvis_platform.registry import PlatformRegistry
|
|
|
11
11
|
from jarvis.jarvis_utils.output import PrettyOutput, OutputType
|
|
12
12
|
from jarvis.jarvis_utils.embedding import get_context_token_count
|
|
13
13
|
from jarvis.jarvis_utils.config import get_max_tool_call_count, is_auto_complete, is_execute_tool_confirm
|
|
14
|
-
from jarvis.jarvis_utils.methodology import load_methodology
|
|
14
|
+
from jarvis.jarvis_utils.methodology import load_methodology, upload_methodology
|
|
15
15
|
from jarvis.jarvis_utils.globals import make_agent_name, set_agent, delete_agent
|
|
16
16
|
from jarvis.jarvis_utils.input import get_multiline_input
|
|
17
17
|
from jarvis.jarvis_utils.config import get_max_token_count
|
|
@@ -645,7 +645,7 @@ want: 创建新工具来解决XXX问题
|
|
|
645
645
|
name: generate_new_tool
|
|
646
646
|
arguments:
|
|
647
647
|
tool_name: 工具名称
|
|
648
|
-
tool_code: |
|
|
648
|
+
tool_code: |2
|
|
649
649
|
# -*- coding: utf-8 -*-
|
|
650
650
|
from typing import Dict, Any
|
|
651
651
|
from jarvis.jarvis_utils.output import PrettyOutput, OutputType
|
|
@@ -699,96 +699,12 @@ name: methodology
|
|
|
699
699
|
arguments:
|
|
700
700
|
operation: add/update
|
|
701
701
|
problem_type: 方法论类型,不要过于细节,也不要过于泛化
|
|
702
|
-
content: |
|
|
702
|
+
content: |2
|
|
703
703
|
方法论内容
|
|
704
704
|
{ct("TOOL_CALL")}
|
|
705
705
|
|
|
706
706
|
如果以上三种情况都不适用,则直接输出原因分析,不要使用工具调用格式。
|
|
707
707
|
</output_requirements>
|
|
708
|
-
|
|
709
|
-
<tool_example>
|
|
710
|
-
以下是一个完整的工具示例,供参考:
|
|
711
|
-
|
|
712
|
-
```python
|
|
713
|
-
# -*- coding: utf-8 -*-
|
|
714
|
-
from typing import Dict, Any
|
|
715
|
-
from jarvis.jarvis_utils.output import PrettyOutput, OutputType
|
|
716
|
-
|
|
717
|
-
class text_transformer:
|
|
718
|
-
name = "text_transformer"
|
|
719
|
-
description = "Tool for text transformation"
|
|
720
|
-
Tool description for text transformation
|
|
721
|
-
适用场景:1. 格式化文本; 2. 处理标题; 3. 标准化输出
|
|
722
|
-
\"\"\"
|
|
723
|
-
|
|
724
|
-
parameters = {{
|
|
725
|
-
"type": "object",
|
|
726
|
-
"properties": {{
|
|
727
|
-
"text": {{
|
|
728
|
-
"type": "string",
|
|
729
|
-
"description": "需要转换格式的文本"
|
|
730
|
-
}},
|
|
731
|
-
"transform_type": {{
|
|
732
|
-
"type": "string",
|
|
733
|
-
"description": "转换类型,可选值为 upper(大写)、lower(小写)或 title(首字母大写)",
|
|
734
|
-
"enum": ["upper", "lower", "title"]
|
|
735
|
-
}}
|
|
736
|
-
}},
|
|
737
|
-
"required": ["text", "transform_type"]
|
|
738
|
-
}}
|
|
739
|
-
|
|
740
|
-
@staticmethod
|
|
741
|
-
def check() -> bool:
|
|
742
|
-
return True
|
|
743
|
-
|
|
744
|
-
def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
|
|
745
|
-
try:
|
|
746
|
-
text = args["text"]
|
|
747
|
-
transform_type = args["transform_type"]
|
|
748
|
-
|
|
749
|
-
# 使用PrettyOutput显示执行过程
|
|
750
|
-
PrettyOutput.print(f"正在将文本转换为 {{transform_type}} 格式...", OutputType.INFO)
|
|
751
|
-
|
|
752
|
-
if transform_type == "upper":
|
|
753
|
-
result = text.upper()
|
|
754
|
-
PrettyOutput.print("文本已转换为大写", OutputType.SUCCESS)
|
|
755
|
-
elif transform_type == "lower":
|
|
756
|
-
result = text.lower()
|
|
757
|
-
PrettyOutput.print("文本已转换为小写", OutputType.SUCCESS)
|
|
758
|
-
elif transform_type == "title":
|
|
759
|
-
result = text.title()
|
|
760
|
-
PrettyOutput.print("文本已转换为首字母大写", OutputType.SUCCESS)
|
|
761
|
-
else:
|
|
762
|
-
PrettyOutput.print(f"不支持的转换类型: {{transform_type}}", OutputType.ERROR)
|
|
763
|
-
return {{
|
|
764
|
-
"success": False,
|
|
765
|
-
"stdout": "",
|
|
766
|
-
"stderr": f"不支持的转换类型: {{transform_type}}"
|
|
767
|
-
}}
|
|
768
|
-
|
|
769
|
-
return {{
|
|
770
|
-
"success": True,
|
|
771
|
-
"stdout": result,
|
|
772
|
-
"stderr": ""
|
|
773
|
-
}}
|
|
774
|
-
|
|
775
|
-
except Exception as e:
|
|
776
|
-
PrettyOutput.print(f"转换失败: {{str(e)}}", OutputType.ERROR)
|
|
777
|
-
return {{
|
|
778
|
-
"success": False,
|
|
779
|
-
"stdout": "",
|
|
780
|
-
"stderr": f"转换失败: {{str(e)}}"
|
|
781
|
-
}}
|
|
782
|
-
```
|
|
783
|
-
|
|
784
|
-
使用方法:
|
|
785
|
-
```
|
|
786
|
-
name: text_transformer
|
|
787
|
-
arguments:
|
|
788
|
-
text: hello world
|
|
789
|
-
transform_type: upper
|
|
790
|
-
```
|
|
791
|
-
</tool_example>
|
|
792
708
|
</task_analysis>"""
|
|
793
709
|
|
|
794
710
|
self.prompt = analysis_prompt
|
|
@@ -838,7 +754,16 @@ arguments:
|
|
|
838
754
|
msg = user_input
|
|
839
755
|
for handler in self.input_handler:
|
|
840
756
|
msg, _ = handler(msg, self)
|
|
841
|
-
|
|
757
|
+
|
|
758
|
+
# 先尝试上传方法轮
|
|
759
|
+
platform = self.model if hasattr(self.model, 'upload_files') else None
|
|
760
|
+
if platform and upload_methodology(platform):
|
|
761
|
+
methodology_prompt = f"{user_input}\n\n方法论已上传到平台,请参考平台上的方法论内容"
|
|
762
|
+
else:
|
|
763
|
+
# 上传失败则回退到本地加载
|
|
764
|
+
methodology_prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
|
|
765
|
+
|
|
766
|
+
self.prompt = methodology_prompt
|
|
842
767
|
self.first = False
|
|
843
768
|
|
|
844
769
|
self.conversation_length = get_context_token_count(self.prompt)
|
|
@@ -374,7 +374,8 @@ class CodeAgent:
|
|
|
374
374
|
final_ret += f"# 应用补丁:\n```diff\n{diff}\n```"
|
|
375
375
|
|
|
376
376
|
# 修改后的提示逻辑
|
|
377
|
-
addon_prompt =
|
|
377
|
+
addon_prompt = "如果对应语言有静态检查工具,请使用静态检查工具检查代码,如果本次修改引入了警告和错误,请根据警告和错误信息修复代码\n"
|
|
378
|
+
addon_prompt += "在引入警告和错误都被修复的前提下,如果用户的需求未完成,请继续生成补丁,如果已经完成,请终止,不要实现任何超出用户需求外的内容\n"
|
|
378
379
|
addon_prompt += "如果有任何信息不明确,调用工具获取信息\n"
|
|
379
380
|
addon_prompt += "每次响应必须且只能包含一个操作\n"
|
|
380
381
|
|
jarvis/jarvis_dev/main.py
CHANGED
|
@@ -125,7 +125,7 @@ PM_PROMPT = f"""
|
|
|
125
125
|
## 消息传递模板
|
|
126
126
|
{ot("SEND_MESSAGE")}
|
|
127
127
|
to: [角色]
|
|
128
|
-
content: |
|
|
128
|
+
content: |2
|
|
129
129
|
# [任务主题]
|
|
130
130
|
|
|
131
131
|
## 背景与目标
|
|
@@ -259,7 +259,7 @@ BA_PROMPT = f"""
|
|
|
259
259
|
## 消息传递模板
|
|
260
260
|
{ot("SEND_MESSAGE")}
|
|
261
261
|
to: [角色]
|
|
262
|
-
content: |
|
|
262
|
+
content: |2
|
|
263
263
|
# [需求主题]
|
|
264
264
|
|
|
265
265
|
## 背景与目标
|
|
@@ -415,7 +415,7 @@ SA_PROMPT = f"""
|
|
|
415
415
|
## 消息传递模板
|
|
416
416
|
{ot("SEND_MESSAGE")}
|
|
417
417
|
to: [角色]
|
|
418
|
-
content: |
|
|
418
|
+
content: |2
|
|
419
419
|
# [架构主题]
|
|
420
420
|
|
|
421
421
|
## 背景与目标
|
|
@@ -571,7 +571,7 @@ TL_PROMPT = f"""
|
|
|
571
571
|
## 消息传递模板
|
|
572
572
|
{ot("SEND_MESSAGE")}
|
|
573
573
|
to: [角色]
|
|
574
|
-
content: |
|
|
574
|
+
content: |2
|
|
575
575
|
# [技术主题]
|
|
576
576
|
|
|
577
577
|
## 背景与目标
|
|
@@ -750,7 +750,7 @@ arguments:
|
|
|
750
750
|
## 消息传递模板
|
|
751
751
|
{ot("SEND_MESSAGE")}
|
|
752
752
|
to: [角色]
|
|
753
|
-
content: |
|
|
753
|
+
content: |2
|
|
754
754
|
# [开发主题]
|
|
755
755
|
|
|
756
756
|
## 背景与目标
|
|
@@ -931,7 +931,7 @@ arguments:
|
|
|
931
931
|
## 消息传递模板
|
|
932
932
|
{ot("SEND_MESSAGE")}
|
|
933
933
|
to: [角色]
|
|
934
|
-
content: |
|
|
934
|
+
content: |2
|
|
935
935
|
# [测试主题]
|
|
936
936
|
|
|
937
937
|
## 背景与目标
|
|
@@ -95,10 +95,10 @@ def extract_methodology(input_file):
|
|
|
95
95
|
请按以下格式返回结果:
|
|
96
96
|
<methodologies>
|
|
97
97
|
- problem_type: [问题类型1]
|
|
98
|
-
content: |
|
|
98
|
+
content: |2
|
|
99
99
|
[多行方法论内容1]
|
|
100
100
|
- problem_type: [问题类型2]
|
|
101
|
-
content: |
|
|
101
|
+
content: |2
|
|
102
102
|
[多行方法论内容2]
|
|
103
103
|
</methodologies>
|
|
104
104
|
|
|
@@ -192,10 +192,10 @@ def extract_methodology_from_url(url):
|
|
|
192
192
|
请按以下格式返回结果:
|
|
193
193
|
<methodologies>
|
|
194
194
|
- problem_type: [问题类型1]
|
|
195
|
-
content: |
|
|
195
|
+
content: |2
|
|
196
196
|
[多行方法论内容1]
|
|
197
197
|
- problem_type: [问题类型2]
|
|
198
|
-
content: |
|
|
198
|
+
content: |2
|
|
199
199
|
[多行方法论内容2]
|
|
200
200
|
</methodologies>
|
|
201
201
|
|
|
@@ -33,23 +33,18 @@ class MultiAgent(OutputHandler):
|
|
|
33
33
|
```
|
|
34
34
|
{ot("SEND_MESSAGE")}
|
|
35
35
|
to: 智能体名称 # 目标智能体名称
|
|
36
|
-
content: |
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
## 期望结果
|
|
49
|
-
[描述期望的输出格式和内容]
|
|
50
|
-
|
|
51
|
-
## 下一步计划
|
|
52
|
-
[描述下一步的计划和行动]
|
|
36
|
+
content: |2
|
|
37
|
+
# 消息主题
|
|
38
|
+
## 背景信息
|
|
39
|
+
[提供必要的上下文和背景]
|
|
40
|
+
## 具体需求
|
|
41
|
+
[明确表达期望完成的任务]
|
|
42
|
+
## 相关资源
|
|
43
|
+
[列出相关文档、数据或工具]
|
|
44
|
+
## 期望结果
|
|
45
|
+
[描述期望的输出格式和内容]
|
|
46
|
+
## 下一步计划
|
|
47
|
+
[描述下一步的计划和行动]
|
|
53
48
|
{ct("SEND_MESSAGE")}
|
|
54
49
|
```
|
|
55
50
|
|
|
@@ -58,11 +53,10 @@ content: |
|
|
|
58
53
|
```
|
|
59
54
|
{ot("SEND_MESSAGE")}
|
|
60
55
|
to: 智能体名称 # 目标智能体名称
|
|
61
|
-
content: |
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
[任务完成结果,用于反馈]
|
|
56
|
+
content: |2
|
|
57
|
+
# 消息主题
|
|
58
|
+
## 任务结果
|
|
59
|
+
[任务完成结果,用于反馈]
|
|
66
60
|
{ct("SEND_MESSAGE")}
|
|
67
61
|
```
|
|
68
62
|
|
jarvis/jarvis_platform/base.py
CHANGED
|
@@ -48,7 +48,6 @@ class BasePlatform(ABC):
|
|
|
48
48
|
start_time = time.time()
|
|
49
49
|
|
|
50
50
|
input_token_count = get_context_token_count(message)
|
|
51
|
-
|
|
52
51
|
if is_context_overflow(message):
|
|
53
52
|
PrettyOutput.print("错误:输入内容超过最大限制", OutputType.WARNING)
|
|
54
53
|
return "错误:输入内容超过最大限制"
|
|
@@ -56,10 +55,12 @@ class BasePlatform(ABC):
|
|
|
56
55
|
if input_token_count > get_max_input_token_count():
|
|
57
56
|
current_suppress_output = self.suppress_output
|
|
58
57
|
self.set_suppress_output(True)
|
|
59
|
-
|
|
58
|
+
max_chunk_size = get_max_input_token_count() - 1024 # 留出一些余量
|
|
59
|
+
min_chunk_size = max_chunk_size // 2 # 最小块大小设为最大块大小的一半
|
|
60
|
+
inputs = split_text_into_chunks(message, max_chunk_size, min_chunk_size)
|
|
60
61
|
with yaspin(text="正在提交长上下文...", color="cyan") as spinner:
|
|
61
62
|
prefix_prompt = f"""
|
|
62
|
-
|
|
63
|
+
我将分多次提供大量内容,在我明确告诉你内容已经全部提供完毕之前,每次仅需要输出"已收到",明白请输出"开始接收输入"。
|
|
63
64
|
"""
|
|
64
65
|
while_true(lambda: while_success(lambda: self.chat(prefix_prompt), 5), 5)
|
|
65
66
|
submit_count = 0
|
jarvis/jarvis_platform/kimi.py
CHANGED
|
@@ -5,6 +5,10 @@ import json
|
|
|
5
5
|
import os
|
|
6
6
|
import mimetypes
|
|
7
7
|
import time
|
|
8
|
+
from rich.live import Live
|
|
9
|
+
from rich.text import Text
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich import box
|
|
8
12
|
from jarvis.jarvis_platform.base import BasePlatform
|
|
9
13
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
10
14
|
from jarvis.jarvis_utils.utils import while_success
|
|
@@ -281,61 +285,127 @@ class KimiModel(BasePlatform):
|
|
|
281
285
|
search_results = []
|
|
282
286
|
ref_sources = []
|
|
283
287
|
|
|
284
|
-
|
|
285
|
-
if not line:
|
|
286
|
-
continue
|
|
287
|
-
|
|
288
|
-
line = line.decode('utf-8')
|
|
289
|
-
if not line.startswith("data: "):
|
|
290
|
-
continue
|
|
291
|
-
|
|
292
|
-
try:
|
|
293
|
-
data = json.loads(line[6:])
|
|
294
|
-
event = data.get("event")
|
|
295
|
-
|
|
296
|
-
if event == "cmpl":
|
|
297
|
-
# 处理补全文本
|
|
298
|
-
text = data.get("text", "")
|
|
299
|
-
if text:
|
|
300
|
-
if not self.suppress_output:
|
|
301
|
-
PrettyOutput.print_stream(text)
|
|
302
|
-
full_response += text
|
|
303
|
-
|
|
304
|
-
elif event == "search_plus":
|
|
305
|
-
# 收集搜索结果
|
|
306
|
-
msg = data.get("msg", {})
|
|
307
|
-
if msg.get("type") == "get_res":
|
|
308
|
-
search_results.append({
|
|
309
|
-
"date": msg.get("date", ""),
|
|
310
|
-
"site_name": msg.get("site_name", ""),
|
|
311
|
-
"snippet": msg.get("snippet", ""),
|
|
312
|
-
"title": msg.get("title", ""),
|
|
313
|
-
"type": msg.get("type", ""),
|
|
314
|
-
"url": msg.get("url", "")
|
|
315
|
-
})
|
|
316
|
-
|
|
317
|
-
elif event == "ref_docs":
|
|
318
|
-
# 收集引用来源
|
|
319
|
-
ref_cards = data.get("ref_cards", [])
|
|
320
|
-
for card in ref_cards:
|
|
321
|
-
ref_sources.append({
|
|
322
|
-
"idx_s": card.get("idx_s", ""),
|
|
323
|
-
"idx_z": card.get("idx_z", ""),
|
|
324
|
-
"ref_id": card.get("ref_id", ""),
|
|
325
|
-
"url": card.get("url", ""),
|
|
326
|
-
"title": card.get("title", ""),
|
|
327
|
-
"abstract": card.get("abstract", ""),
|
|
328
|
-
"source": card.get("source_label", ""),
|
|
329
|
-
"rag_segments": card.get("rag_segments", []),
|
|
330
|
-
"origin": card.get("origin", {})
|
|
331
|
-
})
|
|
332
|
-
|
|
333
|
-
except json.JSONDecodeError:
|
|
334
|
-
continue
|
|
335
|
-
|
|
288
|
+
# 使用Rich的Live组件来实时展示更新
|
|
336
289
|
if not self.suppress_output:
|
|
337
|
-
|
|
338
|
-
|
|
290
|
+
text_content = Text()
|
|
291
|
+
panel = Panel(text_content,
|
|
292
|
+
title=f"[bold magenta]{self.model_name}[/bold magenta]",
|
|
293
|
+
subtitle="思考中...",
|
|
294
|
+
border_style="magenta",
|
|
295
|
+
box=box.ROUNDED)
|
|
296
|
+
|
|
297
|
+
with Live(panel, refresh_per_second=3, transient=False) as live:
|
|
298
|
+
for line in response.iter_lines():
|
|
299
|
+
if not line:
|
|
300
|
+
continue
|
|
301
|
+
|
|
302
|
+
line = line.decode('utf-8')
|
|
303
|
+
if not line.startswith("data: "):
|
|
304
|
+
continue
|
|
305
|
+
|
|
306
|
+
try:
|
|
307
|
+
data = json.loads(line[6:])
|
|
308
|
+
event = data.get("event")
|
|
309
|
+
|
|
310
|
+
if event == "cmpl":
|
|
311
|
+
# 处理补全文本
|
|
312
|
+
text = data.get("text", "")
|
|
313
|
+
if text:
|
|
314
|
+
full_response += text
|
|
315
|
+
text_content.append(text)
|
|
316
|
+
panel.subtitle = "生成中..."
|
|
317
|
+
live.update(panel)
|
|
318
|
+
|
|
319
|
+
elif event == "search_plus":
|
|
320
|
+
# 收集搜索结果
|
|
321
|
+
msg = data.get("msg", {})
|
|
322
|
+
if msg.get("type") == "get_res":
|
|
323
|
+
search_results.append({
|
|
324
|
+
"date": msg.get("date", ""),
|
|
325
|
+
"site_name": msg.get("site_name", ""),
|
|
326
|
+
"snippet": msg.get("snippet", ""),
|
|
327
|
+
"title": msg.get("title", ""),
|
|
328
|
+
"type": msg.get("type", ""),
|
|
329
|
+
"url": msg.get("url", "")
|
|
330
|
+
})
|
|
331
|
+
panel.subtitle = f"搜索中: 找到 {len(search_results)} 个结果"
|
|
332
|
+
live.update(panel)
|
|
333
|
+
|
|
334
|
+
elif event == "ref_docs":
|
|
335
|
+
# 收集引用来源
|
|
336
|
+
ref_cards = data.get("ref_cards", [])
|
|
337
|
+
for card in ref_cards:
|
|
338
|
+
ref_sources.append({
|
|
339
|
+
"idx_s": card.get("idx_s", ""),
|
|
340
|
+
"idx_z": card.get("idx_z", ""),
|
|
341
|
+
"ref_id": card.get("ref_id", ""),
|
|
342
|
+
"url": card.get("url", ""),
|
|
343
|
+
"title": card.get("title", ""),
|
|
344
|
+
"abstract": card.get("abstract", ""),
|
|
345
|
+
"source": card.get("source_label", ""),
|
|
346
|
+
"rag_segments": card.get("rag_segments", []),
|
|
347
|
+
"origin": card.get("origin", {})
|
|
348
|
+
})
|
|
349
|
+
panel.subtitle = f"分析引用: 找到 {len(ref_sources)} 个来源"
|
|
350
|
+
live.update(panel)
|
|
351
|
+
|
|
352
|
+
except json.JSONDecodeError:
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
# 显示对话完成状态
|
|
356
|
+
panel.subtitle = "[bold green]回答完成[/bold green]"
|
|
357
|
+
live.update(panel)
|
|
358
|
+
else:
|
|
359
|
+
# 如果禁止输出,则静默处理
|
|
360
|
+
for line in response.iter_lines():
|
|
361
|
+
if not line:
|
|
362
|
+
continue
|
|
363
|
+
|
|
364
|
+
line = line.decode('utf-8')
|
|
365
|
+
if not line.startswith("data: "):
|
|
366
|
+
continue
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
data = json.loads(line[6:])
|
|
370
|
+
event = data.get("event")
|
|
371
|
+
|
|
372
|
+
if event == "cmpl":
|
|
373
|
+
# 处理补全文本
|
|
374
|
+
text = data.get("text", "")
|
|
375
|
+
if text:
|
|
376
|
+
full_response += text
|
|
377
|
+
|
|
378
|
+
elif event == "search_plus":
|
|
379
|
+
# 收集搜索结果
|
|
380
|
+
msg = data.get("msg", {})
|
|
381
|
+
if msg.get("type") == "get_res":
|
|
382
|
+
search_results.append({
|
|
383
|
+
"date": msg.get("date", ""),
|
|
384
|
+
"site_name": msg.get("site_name", ""),
|
|
385
|
+
"snippet": msg.get("snippet", ""),
|
|
386
|
+
"title": msg.get("title", ""),
|
|
387
|
+
"type": msg.get("type", ""),
|
|
388
|
+
"url": msg.get("url", "")
|
|
389
|
+
})
|
|
390
|
+
|
|
391
|
+
elif event == "ref_docs":
|
|
392
|
+
# 收集引用来源
|
|
393
|
+
ref_cards = data.get("ref_cards", [])
|
|
394
|
+
for card in ref_cards:
|
|
395
|
+
ref_sources.append({
|
|
396
|
+
"idx_s": card.get("idx_s", ""),
|
|
397
|
+
"idx_z": card.get("idx_z", ""),
|
|
398
|
+
"ref_id": card.get("ref_id", ""),
|
|
399
|
+
"url": card.get("url", ""),
|
|
400
|
+
"title": card.get("title", ""),
|
|
401
|
+
"abstract": card.get("abstract", ""),
|
|
402
|
+
"source": card.get("source_label", ""),
|
|
403
|
+
"rag_segments": card.get("rag_segments", []),
|
|
404
|
+
"origin": card.get("origin", {})
|
|
405
|
+
})
|
|
406
|
+
|
|
407
|
+
except json.JSONDecodeError:
|
|
408
|
+
continue
|
|
339
409
|
|
|
340
410
|
# 显示搜索结果摘要
|
|
341
411
|
if search_results and not self.suppress_output:
|
jarvis/jarvis_platform/openai.py
CHANGED
|
@@ -2,6 +2,10 @@
|
|
|
2
2
|
from typing import Dict, List, Tuple
|
|
3
3
|
import os
|
|
4
4
|
from openai import OpenAI
|
|
5
|
+
from rich.live import Live
|
|
6
|
+
from rich.text import Text
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
from rich import box
|
|
5
9
|
from jarvis.jarvis_platform.base import BasePlatform
|
|
6
10
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
7
11
|
|
|
@@ -84,15 +88,32 @@ class OpenAIModel(BasePlatform):
|
|
|
84
88
|
|
|
85
89
|
full_response = ""
|
|
86
90
|
|
|
87
|
-
|
|
88
|
-
if chunk.choices and chunk.choices[0].delta.content:
|
|
89
|
-
text = chunk.choices[0].delta.content
|
|
90
|
-
if not self.suppress_output:
|
|
91
|
-
PrettyOutput.print_stream(text)
|
|
92
|
-
full_response += text
|
|
93
|
-
|
|
91
|
+
# 使用Rich的Live组件来实时展示更新
|
|
94
92
|
if not self.suppress_output:
|
|
95
|
-
|
|
93
|
+
text_content = Text()
|
|
94
|
+
panel = Panel(text_content,
|
|
95
|
+
title=f"[bold blue]{self.model_name}[/bold blue]",
|
|
96
|
+
subtitle="生成中...",
|
|
97
|
+
border_style="cyan",
|
|
98
|
+
box=box.ROUNDED)
|
|
99
|
+
|
|
100
|
+
with Live(panel, refresh_per_second=3, transient=False) as live:
|
|
101
|
+
for chunk in response:
|
|
102
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
103
|
+
text = chunk.choices[0].delta.content
|
|
104
|
+
full_response += text
|
|
105
|
+
text_content.append(text)
|
|
106
|
+
live.update(panel)
|
|
107
|
+
|
|
108
|
+
# 显示对话完成状态
|
|
109
|
+
panel.subtitle = "[bold green]对话完成[/bold green]"
|
|
110
|
+
live.update(panel)
|
|
111
|
+
else:
|
|
112
|
+
# 如果禁止输出,则静默处理
|
|
113
|
+
for chunk in response:
|
|
114
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
115
|
+
text = chunk.choices[0].delta.content
|
|
116
|
+
full_response += text
|
|
96
117
|
|
|
97
118
|
# Add assistant reply to history
|
|
98
119
|
self.messages.append({"role": "assistant", "content": full_response})
|