jarvis-ai-assistant 0.1.174__py3-none-any.whl → 0.1.176__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

jarvis/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """Jarvis AI Assistant"""
3
3
 
4
- __version__ = "0.1.174"
4
+ __version__ = "0.1.176"
@@ -11,7 +11,7 @@ from jarvis.jarvis_platform.registry import PlatformRegistry
11
11
  from jarvis.jarvis_utils.output import PrettyOutput, OutputType
12
12
  from jarvis.jarvis_utils.embedding import get_context_token_count
13
13
  from jarvis.jarvis_utils.config import get_max_tool_call_count, is_auto_complete, is_execute_tool_confirm
14
- from jarvis.jarvis_utils.methodology import load_methodology
14
+ from jarvis.jarvis_utils.methodology import load_methodology, upload_methodology
15
15
  from jarvis.jarvis_utils.globals import make_agent_name, set_agent, delete_agent
16
16
  from jarvis.jarvis_utils.input import get_multiline_input
17
17
  from jarvis.jarvis_utils.config import get_max_token_count
@@ -337,33 +337,20 @@ class Agent:
337
337
  action_handlers = '\n'.join([f'- {handler.name()}' for handler in self.output_handler])
338
338
 
339
339
  # 任务完成提示
340
- complete_prompt = f"3. 输出{ot('!!!COMPLETE!!!')}" if need_complete and self.auto_complete else ""
340
+ complete_prompt = f"- 输出{ot('!!!COMPLETE!!!')}" if need_complete and self.auto_complete else ""
341
341
 
342
342
  addon_prompt = f"""
343
- <addon>
344
- <instructions>
345
- **系统指令:**
346
- - 每次响应必须且只能包含一个操作
347
- - 严格遵循操作调用格式
348
- - 必须包含参数和说明
349
- - 操作结束需等待结果
350
- - 如果判断任务已经完成,不必输出操作
343
+ 请判断是否已经完成任务,如果已经完成:
344
+ - 说明完成原因,不需要再有新的操作
345
+ {complete_prompt}
346
+ 如果没有完成,请进行下一步操作:
347
+ - 仅包含一个操作
351
348
  - 如果信息不明确,请请求用户补充
352
349
  - 如果执行过程中连续失败5次,请使用ask_user询问用户操作
353
- </instructions>
354
-
355
- <actions>
356
- **可用操作列表:**
350
+ - 操作列表:
357
351
  {action_handlers}
358
- </actions>
359
352
 
360
- <completion>
361
- 如果任务已完成,请:
362
- 1. 说明完成原因
363
- 2. 保持输出格式规范
364
- {complete_prompt}
365
- </completion>
366
- </addon>
353
+ 请继续。
367
354
  """
368
355
 
369
356
  return addon_prompt
@@ -401,8 +388,6 @@ class Agent:
401
388
  if self.conversation_length > self.max_token_count:
402
389
  message = self._summarize_and_clear_history() + "\n\n" + message
403
390
  self.conversation_length += get_context_token_count(message)
404
-
405
- print("🤖 模型思考:")
406
391
  return self.model.chat_until_success(message) # type: ignore
407
392
 
408
393
 
@@ -547,7 +532,7 @@ class Agent:
547
532
  1. 首先检查现有工具或方法论是否已经可以完成该任务,如果可以,直接说明即可,无需生成新内容
548
533
  2. 如果现有工具/方法论不足,评估当前任务是否可以通过编写新工具来自动化解决
549
534
  3. 如果可以通过工具解决,请设计并提供工具代码
550
- 4. 如果无法通过编写通用工具完成,评估是否可以总结为通用方法论
535
+ 4. 如果无法通过编写通用工具完成,评估当前的执行流程是否可以总结为通用方法论
551
536
  5. 如果以上都不可行,给出详细理由
552
537
 
553
538
  请根据分析结果采取相应行动:说明现有工具/方法论、创建新工具、生成新方法论或说明原因。
@@ -570,6 +555,7 @@ class Agent:
570
555
  2. 方法论应该具备足够的通用性,可应用于同类问题
571
556
  3. 特别注意用户在执行过程中提供的修正、反馈和改进建议
572
557
  4. 如果用户明确指出了某个解决步骤的优化方向,这应该被纳入方法论
558
+ 5. 方法论要严格按照实际的执行流程来总结,不要遗漏或增加任何步骤
573
559
  </evaluation_criteria>
574
560
 
575
561
  <tool_requirements>
@@ -645,7 +631,7 @@ want: 创建新工具来解决XXX问题
645
631
  name: generate_new_tool
646
632
  arguments:
647
633
  tool_name: 工具名称
648
- tool_code: |
634
+ tool_code: |2
649
635
  # -*- coding: utf-8 -*-
650
636
  from typing import Dict, Any
651
637
  from jarvis.jarvis_utils.output import PrettyOutput, OutputType
@@ -699,96 +685,12 @@ name: methodology
699
685
  arguments:
700
686
  operation: add/update
701
687
  problem_type: 方法论类型,不要过于细节,也不要过于泛化
702
- content: |
688
+ content: |2
703
689
  方法论内容
704
690
  {ct("TOOL_CALL")}
705
691
 
706
692
  如果以上三种情况都不适用,则直接输出原因分析,不要使用工具调用格式。
707
693
  </output_requirements>
708
-
709
- <tool_example>
710
- 以下是一个完整的工具示例,供参考:
711
-
712
- ```python
713
- # -*- coding: utf-8 -*-
714
- from typing import Dict, Any
715
- from jarvis.jarvis_utils.output import PrettyOutput, OutputType
716
-
717
- class text_transformer:
718
- name = "text_transformer"
719
- description = "Tool for text transformation"
720
- Tool description for text transformation
721
- 适用场景:1. 格式化文本; 2. 处理标题; 3. 标准化输出
722
- \"\"\"
723
-
724
- parameters = {{
725
- "type": "object",
726
- "properties": {{
727
- "text": {{
728
- "type": "string",
729
- "description": "需要转换格式的文本"
730
- }},
731
- "transform_type": {{
732
- "type": "string",
733
- "description": "转换类型,可选值为 upper(大写)、lower(小写)或 title(首字母大写)",
734
- "enum": ["upper", "lower", "title"]
735
- }}
736
- }},
737
- "required": ["text", "transform_type"]
738
- }}
739
-
740
- @staticmethod
741
- def check() -> bool:
742
- return True
743
-
744
- def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
745
- try:
746
- text = args["text"]
747
- transform_type = args["transform_type"]
748
-
749
- # 使用PrettyOutput显示执行过程
750
- PrettyOutput.print(f"正在将文本转换为 {{transform_type}} 格式...", OutputType.INFO)
751
-
752
- if transform_type == "upper":
753
- result = text.upper()
754
- PrettyOutput.print("文本已转换为大写", OutputType.SUCCESS)
755
- elif transform_type == "lower":
756
- result = text.lower()
757
- PrettyOutput.print("文本已转换为小写", OutputType.SUCCESS)
758
- elif transform_type == "title":
759
- result = text.title()
760
- PrettyOutput.print("文本已转换为首字母大写", OutputType.SUCCESS)
761
- else:
762
- PrettyOutput.print(f"不支持的转换类型: {{transform_type}}", OutputType.ERROR)
763
- return {{
764
- "success": False,
765
- "stdout": "",
766
- "stderr": f"不支持的转换类型: {{transform_type}}"
767
- }}
768
-
769
- return {{
770
- "success": True,
771
- "stdout": result,
772
- "stderr": ""
773
- }}
774
-
775
- except Exception as e:
776
- PrettyOutput.print(f"转换失败: {{str(e)}}", OutputType.ERROR)
777
- return {{
778
- "success": False,
779
- "stdout": "",
780
- "stderr": f"转换失败: {{str(e)}}"
781
- }}
782
- ```
783
-
784
- 使用方法:
785
- ```
786
- name: text_transformer
787
- arguments:
788
- text: hello world
789
- transform_type: upper
790
- ```
791
- </tool_example>
792
694
  </task_analysis>"""
793
695
 
794
696
  self.prompt = analysis_prompt
@@ -835,10 +737,18 @@ arguments:
835
737
  self.prompt = f"{user_input}"
836
738
 
837
739
  if self.first:
838
- msg = user_input
839
- for handler in self.input_handler:
840
- msg, _ = handler(msg, self)
841
- self.prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
740
+
741
+ # 先尝试上传方法轮
742
+ platform = self.model if hasattr(self.model, 'upload_files') else None
743
+ if platform and upload_methodology(platform):
744
+ self.prompt = f"{user_input}\n\n方法论已上传到平台,请参考平台上的方法论内容"
745
+ else:
746
+ msg = user_input
747
+ for handler in self.input_handler:
748
+ msg, _ = handler(msg, self)
749
+ # 上传失败则回退到本地加载
750
+ self.prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
751
+
842
752
  self.first = False
843
753
 
844
754
  self.conversation_length = get_context_token_count(self.prompt)
@@ -18,7 +18,6 @@ from jarvis.jarvis_agent import (
18
18
  )
19
19
  from jarvis.jarvis_tools.registry import ToolRegistry
20
20
  from jarvis.jarvis_utils.utils import init_env
21
- from jarvis.jarvis_agent.file_input_handler import file_input_handler
22
21
  from jarvis.jarvis_agent.shell_input_handler import shell_input_handler
23
22
  from jarvis.jarvis_agent.builtin_input_handler import builtin_input_handler
24
23
 
@@ -122,7 +121,10 @@ def main() -> None:
122
121
  system_prompt=origin_agent_system_prompt,
123
122
  platform=args.platform,
124
123
  model_name=args.model,
125
- input_handler=[file_input_handler, shell_input_handler, builtin_input_handler],
124
+ input_handler=[
125
+ shell_input_handler,
126
+ builtin_input_handler
127
+ ],
126
128
  output_handler=[ToolRegistry()],
127
129
  need_summary=False
128
130
  )
@@ -16,7 +16,6 @@ from yaspin import yaspin # type: ignore
16
16
 
17
17
  from jarvis.jarvis_agent import Agent
18
18
  from jarvis.jarvis_agent.builtin_input_handler import builtin_input_handler
19
- from jarvis.jarvis_agent.file_input_handler import file_input_handler
20
19
  from jarvis.jarvis_agent.shell_input_handler import shell_input_handler
21
20
  from jarvis.jarvis_platform.registry import PlatformRegistry
22
21
  from jarvis.jarvis_git_utils.git_commiter import GitCommitTool
@@ -109,7 +108,6 @@ class CodeAgent:
109
108
  platform=platform_instance,
110
109
  input_handler=[
111
110
  shell_input_handler,
112
- file_input_handler,
113
111
  builtin_input_handler
114
112
  ],
115
113
  need_summary=need_summary
@@ -374,7 +372,8 @@ class CodeAgent:
374
372
  final_ret += f"# 应用补丁:\n```diff\n{diff}\n```"
375
373
 
376
374
  # 修改后的提示逻辑
377
- addon_prompt = f"如果用户的需求未完成,请继续生成补丁,如果已经完成,请终止,不要输出新的PATCH,不要实现任何超出用户需求外的内容\n"
375
+ addon_prompt = "如果对应语言有静态检查工具,请使用静态检查工具检查修改的代码,如果本次修改引入了警告和错误,请根据警告和错误信息修复代码\n"
376
+ addon_prompt += "在引入警告和错误都被修复的前提下,如果用户的需求未完成,请继续修改代码,如果已经完成,请终止,不要实现任何超出用户需求外的内容\n"
378
377
  addon_prompt += "如果有任何信息不明确,调用工具获取信息\n"
379
378
  addon_prompt += "每次响应必须且只能包含一个操作\n"
380
379
 
jarvis/jarvis_dev/main.py CHANGED
@@ -125,7 +125,7 @@ PM_PROMPT = f"""
125
125
  ## 消息传递模板
126
126
  {ot("SEND_MESSAGE")}
127
127
  to: [角色]
128
- content: |
128
+ content: |2
129
129
  # [任务主题]
130
130
 
131
131
  ## 背景与目标
@@ -259,7 +259,7 @@ BA_PROMPT = f"""
259
259
  ## 消息传递模板
260
260
  {ot("SEND_MESSAGE")}
261
261
  to: [角色]
262
- content: |
262
+ content: |2
263
263
  # [需求主题]
264
264
 
265
265
  ## 背景与目标
@@ -415,7 +415,7 @@ SA_PROMPT = f"""
415
415
  ## 消息传递模板
416
416
  {ot("SEND_MESSAGE")}
417
417
  to: [角色]
418
- content: |
418
+ content: |2
419
419
  # [架构主题]
420
420
 
421
421
  ## 背景与目标
@@ -571,7 +571,7 @@ TL_PROMPT = f"""
571
571
  ## 消息传递模板
572
572
  {ot("SEND_MESSAGE")}
573
573
  to: [角色]
574
- content: |
574
+ content: |2
575
575
  # [技术主题]
576
576
 
577
577
  ## 背景与目标
@@ -750,7 +750,7 @@ arguments:
750
750
  ## 消息传递模板
751
751
  {ot("SEND_MESSAGE")}
752
752
  to: [角色]
753
- content: |
753
+ content: |2
754
754
  # [开发主题]
755
755
 
756
756
  ## 背景与目标
@@ -931,7 +931,7 @@ arguments:
931
931
  ## 消息传递模板
932
932
  {ot("SEND_MESSAGE")}
933
933
  to: [角色]
934
- content: |
934
+ content: |2
935
935
  # [测试主题]
936
936
 
937
937
  ## 背景与目标
@@ -95,10 +95,10 @@ def extract_methodology(input_file):
95
95
  请按以下格式返回结果:
96
96
  <methodologies>
97
97
  - problem_type: [问题类型1]
98
- content: |
98
+ content: |2
99
99
  [多行方法论内容1]
100
100
  - problem_type: [问题类型2]
101
- content: |
101
+ content: |2
102
102
  [多行方法论内容2]
103
103
  </methodologies>
104
104
 
@@ -192,10 +192,10 @@ def extract_methodology_from_url(url):
192
192
  请按以下格式返回结果:
193
193
  <methodologies>
194
194
  - problem_type: [问题类型1]
195
- content: |
195
+ content: |2
196
196
  [多行方法论内容1]
197
197
  - problem_type: [问题类型2]
198
- content: |
198
+ content: |2
199
199
  [多行方法论内容2]
200
200
  </methodologies>
201
201
 
@@ -33,23 +33,18 @@ class MultiAgent(OutputHandler):
33
33
  ```
34
34
  {ot("SEND_MESSAGE")}
35
35
  to: 智能体名称 # 目标智能体名称
36
- content: |
37
- # 消息主题
38
-
39
- ## 背景信息
40
- [提供必要的上下文和背景]
41
-
42
- ## 具体需求
43
- [明确表达期望完成的任务]
44
-
45
- ## 相关资源
46
- [列出相关文档、数据或工具]
47
-
48
- ## 期望结果
49
- [描述期望的输出格式和内容]
50
-
51
- ## 下一步计划
52
- [描述下一步的计划和行动]
36
+ content: |2
37
+ # 消息主题
38
+ ## 背景信息
39
+ [提供必要的上下文和背景]
40
+ ## 具体需求
41
+ [明确表达期望完成的任务]
42
+ ## 相关资源
43
+ [列出相关文档、数据或工具]
44
+ ## 期望结果
45
+ [描述期望的输出格式和内容]
46
+ ## 下一步计划
47
+ [描述下一步的计划和行动]
53
48
  {ct("SEND_MESSAGE")}
54
49
  ```
55
50
 
@@ -58,11 +53,10 @@ content: |
58
53
  ```
59
54
  {ot("SEND_MESSAGE")}
60
55
  to: 智能体名称 # 目标智能体名称
61
- content: |
62
- # 消息主题
63
-
64
- ## 任务结果
65
- [任务完成结果,用于反馈]
56
+ content: |2
57
+ # 消息主题
58
+ ## 任务结果
59
+ [任务完成结果,用于反馈]
66
60
  {ct("SEND_MESSAGE")}
67
61
  ```
68
62
 
@@ -1,15 +1,19 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  from abc import ABC, abstractmethod
3
3
  import re
4
- from typing import List, Tuple
4
+ from typing import Generator, List, Tuple
5
5
 
6
6
  from yaspin import yaspin
7
- from jarvis.jarvis_utils.config import get_max_input_token_count
7
+
8
+ from jarvis.jarvis_utils.config import get_max_input_token_count, get_pretty_output
8
9
  from jarvis.jarvis_utils.embedding import split_text_into_chunks
9
10
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
10
11
  from jarvis.jarvis_utils.utils import get_context_token_count, is_context_overflow, while_success, while_true
11
12
  from jarvis.jarvis_utils.tag import ot, ct
12
-
13
+ from rich.live import Live
14
+ from rich.text import Text
15
+ from rich.panel import Panel
16
+ from rich import box
13
17
 
14
18
  class BasePlatform(ABC):
15
19
  """Base class for large language models"""
@@ -33,7 +37,7 @@ class BasePlatform(ABC):
33
37
  self.delete_chat()
34
38
 
35
39
  @abstractmethod
36
- def chat(self, message: str) -> str:
40
+ def chat(self, message: str) -> Generator[str, None, None]:
37
41
  """Execute conversation"""
38
42
  raise NotImplementedError("chat is not implemented")
39
43
 
@@ -53,8 +57,6 @@ class BasePlatform(ABC):
53
57
  return "错误:输入内容超过最大限制"
54
58
 
55
59
  if input_token_count > get_max_input_token_count():
56
- current_suppress_output = self.suppress_output
57
- self.set_suppress_output(True)
58
60
  max_chunk_size = get_max_input_token_count() - 1024 # 留出一些余量
59
61
  min_chunk_size = max_chunk_size // 2 # 最小块大小设为最大块大小的一半
60
62
  inputs = split_text_into_chunks(message, max_chunk_size, min_chunk_size)
@@ -70,32 +72,48 @@ class BasePlatform(ABC):
70
72
  while_true(lambda: while_success(lambda: self.chat(f"<part_content>{input}</part_content>请返回已收到"), 5), 5)
71
73
  spinner.text = "提交完成"
72
74
  spinner.ok("✅")
73
- self.set_suppress_output(current_suppress_output)
74
- response = while_true(lambda: while_success(lambda: self.chat("内容已经全部提供完毕,请继续"), 5), 5)
75
-
75
+ response = while_true(lambda: while_success(lambda: self._chat("内容已经全部提供完毕,请继续"), 5), 5)
76
76
  else:
77
- response = self.chat(message)
78
-
79
- end_time = time.time()
80
- duration = end_time - start_time
81
- char_count = len(response)
82
-
83
- # Calculate token count and tokens per second
84
- try:
85
- token_count = get_context_token_count(response)
86
- tokens_per_second = token_count / duration if duration > 0 else 0
87
- except Exception as e:
88
- PrettyOutput.print(f"Tokenization failed: {str(e)}", OutputType.WARNING)
89
- token_count = 0
90
- tokens_per_second = 0
91
-
92
- # Print statistics
93
- if not self.suppress_output:
94
- PrettyOutput.print(
95
- f"对话完成 - 耗时: {duration:.2f}秒, 输入字符数: {len(message)}, 输入Token数量: {input_token_count}, 输出字符数: {char_count}, 输出Token数量: {token_count}, 每秒Token数量: {tokens_per_second:.2f}",
96
- OutputType.INFO,
77
+ response = ""
78
+
79
+ text_content = Text()
80
+ panel = Panel(
81
+ text_content,
82
+ title=f"[bold cyan]{self.name()}[/bold cyan]",
83
+ subtitle="[dim]思考中...[/dim]",
84
+ border_style="bright_blue",
85
+ box=box.ROUNDED
97
86
  )
98
87
 
88
+ if not self.suppress_output:
89
+ if get_pretty_output():
90
+ with Live(panel, refresh_per_second=10, transient=False) as live:
91
+ for s in self.chat(message):
92
+ response += s
93
+ text_content.append(s, style="bright_white")
94
+ panel.subtitle = "[yellow]正在回答...[/yellow]"
95
+ live.update(panel)
96
+ end_time = time.time()
97
+ duration = end_time - start_time
98
+ char_count = len(response)
99
+ # Calculate token count and tokens per second
100
+ try:
101
+ token_count = get_context_token_count(response)
102
+ tokens_per_second = token_count / duration if duration > 0 else 0
103
+ except Exception as e:
104
+ PrettyOutput.print(f"Tokenization failed: {str(e)}", OutputType.WARNING)
105
+ token_count = 0
106
+ tokens_per_second = 0
107
+ panel.subtitle = f"[bold green]✓ 对话完成耗时: {duration:.2f}秒, 输入字符数: {len(message)}, 输入Token数量: {input_token_count}, 输出字符数: {char_count}, 输出Token数量: {token_count}, 每秒Token数量: {tokens_per_second:.2f}[/bold green]"
108
+ live.update(panel)
109
+ else:
110
+ for s in self.chat(message):
111
+ print(s, end="", flush=True)
112
+ response += s
113
+ print()
114
+ else:
115
+ for s in self.chat(message):
116
+ response += s
99
117
  # Keep original think tag handling
100
118
  response = re.sub(ot("think")+r'.*?'+ct("think"), '', response, flags=re.DOTALL)
101
119
  return response
@@ -1,5 +1,5 @@
1
1
  # -*- coding: utf-8 -*-
2
- from typing import Dict, List, Tuple
2
+ from typing import Generator, List, Tuple
3
3
  import random
4
4
  import string
5
5
  from jarvis.jarvis_platform.base import BasePlatform
@@ -34,7 +34,7 @@ class HumanPlatform(BasePlatform):
34
34
  else:
35
35
  PrettyOutput.print(f"错误:不支持的模型: {model_name}", OutputType.ERROR)
36
36
 
37
- def chat(self, message: str) -> str:
37
+ def chat(self, message: str) -> Generator[str, None, None]:
38
38
  """发送消息并获取人类响应"""
39
39
  if not self.conversation_id:
40
40
  self.conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
@@ -49,7 +49,8 @@ class HumanPlatform(BasePlatform):
49
49
  prompt = f"{message} {session_info}\n\n请回复:"
50
50
 
51
51
  response = get_multiline_input(prompt)
52
- return response
52
+ yield response
53
+ return None
53
54
 
54
55
  def upload_files(self, file_list: List[str]) -> bool:
55
56
  """文件上传功能,人类平台不需要实际处理"""
@@ -1,10 +1,14 @@
1
1
  # -*- coding: utf-8 -*-
2
- from typing import Dict, List, Tuple
2
+ from typing import Dict, Generator, List, Tuple
3
3
  import requests # type: ignore
4
4
  import json
5
5
  import os
6
6
  import mimetypes
7
7
  import time
8
+ from rich.live import Live
9
+ from rich.text import Text
10
+ from rich.panel import Panel
11
+ from rich import box
8
12
  from jarvis.jarvis_platform.base import BasePlatform
9
13
  from jarvis.jarvis_utils.output import OutputType, PrettyOutput
10
14
  from jarvis.jarvis_utils.utils import while_success
@@ -237,7 +241,7 @@ class KimiModel(BasePlatform):
237
241
  return True
238
242
 
239
243
 
240
- def chat(self, message: str) -> str:
244
+ def chat(self, message: str) -> Generator[str, None, None]:
241
245
  """Send message and get response"""
242
246
  if not self.chat_id:
243
247
  if not self._create_chat():
@@ -275,12 +279,7 @@ class KimiModel(BasePlatform):
275
279
 
276
280
  try:
277
281
  response = while_success(lambda: requests.post(url, headers=headers, json=payload, stream=True), sleep_time=5)
278
- full_response = ""
279
-
280
- # 收集搜索和引用结果
281
- search_results = []
282
- ref_sources = []
283
-
282
+ # 如果禁止输出,则静默处理
284
283
  for line in response.iter_lines():
285
284
  if not line:
286
285
  continue
@@ -297,89 +296,12 @@ class KimiModel(BasePlatform):
297
296
  # 处理补全文本
298
297
  text = data.get("text", "")
299
298
  if text:
300
- if not self.suppress_output:
301
- PrettyOutput.print_stream(text)
302
- full_response += text
303
-
304
- elif event == "search_plus":
305
- # 收集搜索结果
306
- msg = data.get("msg", {})
307
- if msg.get("type") == "get_res":
308
- search_results.append({
309
- "date": msg.get("date", ""),
310
- "site_name": msg.get("site_name", ""),
311
- "snippet": msg.get("snippet", ""),
312
- "title": msg.get("title", ""),
313
- "type": msg.get("type", ""),
314
- "url": msg.get("url", "")
315
- })
316
-
317
- elif event == "ref_docs":
318
- # 收集引用来源
319
- ref_cards = data.get("ref_cards", [])
320
- for card in ref_cards:
321
- ref_sources.append({
322
- "idx_s": card.get("idx_s", ""),
323
- "idx_z": card.get("idx_z", ""),
324
- "ref_id": card.get("ref_id", ""),
325
- "url": card.get("url", ""),
326
- "title": card.get("title", ""),
327
- "abstract": card.get("abstract", ""),
328
- "source": card.get("source_label", ""),
329
- "rag_segments": card.get("rag_segments", []),
330
- "origin": card.get("origin", {})
331
- })
332
-
299
+ yield text
333
300
  except json.JSONDecodeError:
334
301
  continue
335
302
 
336
- if not self.suppress_output:
337
- PrettyOutput.print_stream_end()
338
-
339
-
340
- # 显示搜索结果摘要
341
- if search_results and not self.suppress_output:
342
- output = ["搜索结果:"]
343
- for result in search_results:
344
- output.append(f"- {result['title']}")
345
- if result['date']:
346
- output.append(f" 日期: {result['date']}")
347
- output.append(f" 来源: {result['site_name']}")
348
- if result['snippet']:
349
- output.append(f" 摘要: {result['snippet']}")
350
- output.append(f" 链接: {result['url']}")
351
- output.append("")
352
- PrettyOutput.print("\n".join(output), OutputType.PROGRESS)
353
-
354
- # 显示引用来源
355
- if ref_sources and not self.suppress_output:
356
- output = ["引用来源:"]
357
- for source in ref_sources:
358
- output.append(f"- [{source['ref_id']}] {source['title']} ({source['source']})")
359
- output.append(f" 链接: {source['url']}")
360
- if source['abstract']:
361
- output.append(f" 摘要: {source['abstract']}")
362
-
363
- # 显示相关段落
364
- if source['rag_segments']:
365
- output.append(" 相关段落:")
366
- for segment in source['rag_segments']:
367
- text = segment.get('text', '').replace('\n', ' ').strip()
368
- if text:
369
- output.append(f" - {text}")
370
-
371
- # 显示原文引用
372
- origin = source['origin']
373
- if origin:
374
- text = origin.get('text', '')
375
- if text:
376
- output.append(f" 原文: {text}")
377
-
378
- output.append("")
379
-
380
- PrettyOutput.print("\n".join(output), OutputType.PROGRESS)
381
303
 
382
- return full_response
304
+ return None
383
305
 
384
306
  except Exception as e:
385
307
  raise Exception(f"Chat failed: {str(e)}")