jarvis-ai-assistant 0.1.175__py3-none-any.whl → 0.1.176__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jarvis-ai-assistant might be problematic. Click here for more details.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +15 -30
- jarvis/jarvis_agent/jarvis.py +4 -2
- jarvis/jarvis_code_agent/code_agent.py +2 -4
- jarvis/jarvis_platform/base.py +47 -29
- jarvis/jarvis_platform/human.py +4 -3
- jarvis/jarvis_platform/kimi.py +22 -170
- jarvis/jarvis_platform/openai.py +8 -30
- jarvis/jarvis_platform/yuanbao.py +34 -82
- jarvis/jarvis_tools/ask_codebase.py +7 -1
- jarvis/jarvis_utils/config.py +10 -1
- jarvis/jarvis_utils/embedding.py +1 -10
- jarvis/jarvis_utils/output.py +51 -43
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/METADATA +3 -2
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/RECORD +19 -20
- jarvis/jarvis_agent/file_input_handler.py +0 -108
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
jarvis/jarvis_agent/__init__.py
CHANGED
|
@@ -337,33 +337,20 @@ class Agent:
|
|
|
337
337
|
action_handlers = '\n'.join([f'- {handler.name()}' for handler in self.output_handler])
|
|
338
338
|
|
|
339
339
|
# 任务完成提示
|
|
340
|
-
complete_prompt = f"
|
|
340
|
+
complete_prompt = f"- 输出{ot('!!!COMPLETE!!!')}" if need_complete and self.auto_complete else ""
|
|
341
341
|
|
|
342
342
|
addon_prompt = f"""
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
-
|
|
348
|
-
- 必须包含参数和说明
|
|
349
|
-
- 操作结束需等待结果
|
|
350
|
-
- 如果判断任务已经完成,不必输出操作
|
|
343
|
+
请判断是否已经完成任务,如果已经完成:
|
|
344
|
+
- 说明完成原因,不需要再有新的操作
|
|
345
|
+
{complete_prompt}
|
|
346
|
+
如果没有完成,请进行下一步操作:
|
|
347
|
+
- 仅包含一个操作
|
|
351
348
|
- 如果信息不明确,请请求用户补充
|
|
352
349
|
- 如果执行过程中连续失败5次,请使用ask_user询问用户操作
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
<actions>
|
|
356
|
-
**可用操作列表:**
|
|
350
|
+
- 操作列表:
|
|
357
351
|
{action_handlers}
|
|
358
|
-
</actions>
|
|
359
352
|
|
|
360
|
-
|
|
361
|
-
如果任务已完成,请:
|
|
362
|
-
1. 说明完成原因
|
|
363
|
-
2. 保持输出格式规范
|
|
364
|
-
{complete_prompt}
|
|
365
|
-
</completion>
|
|
366
|
-
</addon>
|
|
353
|
+
请继续。
|
|
367
354
|
"""
|
|
368
355
|
|
|
369
356
|
return addon_prompt
|
|
@@ -401,8 +388,6 @@ class Agent:
|
|
|
401
388
|
if self.conversation_length > self.max_token_count:
|
|
402
389
|
message = self._summarize_and_clear_history() + "\n\n" + message
|
|
403
390
|
self.conversation_length += get_context_token_count(message)
|
|
404
|
-
|
|
405
|
-
print("🤖 模型思考:")
|
|
406
391
|
return self.model.chat_until_success(message) # type: ignore
|
|
407
392
|
|
|
408
393
|
|
|
@@ -547,7 +532,7 @@ class Agent:
|
|
|
547
532
|
1. 首先检查现有工具或方法论是否已经可以完成该任务,如果可以,直接说明即可,无需生成新内容
|
|
548
533
|
2. 如果现有工具/方法论不足,评估当前任务是否可以通过编写新工具来自动化解决
|
|
549
534
|
3. 如果可以通过工具解决,请设计并提供工具代码
|
|
550
|
-
4.
|
|
535
|
+
4. 如果无法通过编写通用工具完成,评估当前的执行流程是否可以总结为通用方法论
|
|
551
536
|
5. 如果以上都不可行,给出详细理由
|
|
552
537
|
|
|
553
538
|
请根据分析结果采取相应行动:说明现有工具/方法论、创建新工具、生成新方法论或说明原因。
|
|
@@ -570,6 +555,7 @@ class Agent:
|
|
|
570
555
|
2. 方法论应该具备足够的通用性,可应用于同类问题
|
|
571
556
|
3. 特别注意用户在执行过程中提供的修正、反馈和改进建议
|
|
572
557
|
4. 如果用户明确指出了某个解决步骤的优化方向,这应该被纳入方法论
|
|
558
|
+
5. 方法论要严格按照实际的执行流程来总结,不要遗漏或增加任何步骤
|
|
573
559
|
</evaluation_criteria>
|
|
574
560
|
|
|
575
561
|
<tool_requirements>
|
|
@@ -751,19 +737,18 @@ arguments:
|
|
|
751
737
|
self.prompt = f"{user_input}"
|
|
752
738
|
|
|
753
739
|
if self.first:
|
|
754
|
-
msg = user_input
|
|
755
|
-
for handler in self.input_handler:
|
|
756
|
-
msg, _ = handler(msg, self)
|
|
757
740
|
|
|
758
741
|
# 先尝试上传方法轮
|
|
759
742
|
platform = self.model if hasattr(self.model, 'upload_files') else None
|
|
760
743
|
if platform and upload_methodology(platform):
|
|
761
|
-
|
|
744
|
+
self.prompt = f"{user_input}\n\n方法论已上传到平台,请参考平台上的方法论内容"
|
|
762
745
|
else:
|
|
746
|
+
msg = user_input
|
|
747
|
+
for handler in self.input_handler:
|
|
748
|
+
msg, _ = handler(msg, self)
|
|
763
749
|
# 上传失败则回退到本地加载
|
|
764
|
-
|
|
750
|
+
self.prompt = f"{user_input}\n\n以下是历史类似问题的执行经验,可参考:\n{load_methodology(msg, self.get_tool_registry())}"
|
|
765
751
|
|
|
766
|
-
self.prompt = methodology_prompt
|
|
767
752
|
self.first = False
|
|
768
753
|
|
|
769
754
|
self.conversation_length = get_context_token_count(self.prompt)
|
jarvis/jarvis_agent/jarvis.py
CHANGED
|
@@ -18,7 +18,6 @@ from jarvis.jarvis_agent import (
|
|
|
18
18
|
)
|
|
19
19
|
from jarvis.jarvis_tools.registry import ToolRegistry
|
|
20
20
|
from jarvis.jarvis_utils.utils import init_env
|
|
21
|
-
from jarvis.jarvis_agent.file_input_handler import file_input_handler
|
|
22
21
|
from jarvis.jarvis_agent.shell_input_handler import shell_input_handler
|
|
23
22
|
from jarvis.jarvis_agent.builtin_input_handler import builtin_input_handler
|
|
24
23
|
|
|
@@ -122,7 +121,10 @@ def main() -> None:
|
|
|
122
121
|
system_prompt=origin_agent_system_prompt,
|
|
123
122
|
platform=args.platform,
|
|
124
123
|
model_name=args.model,
|
|
125
|
-
input_handler=[
|
|
124
|
+
input_handler=[
|
|
125
|
+
shell_input_handler,
|
|
126
|
+
builtin_input_handler
|
|
127
|
+
],
|
|
126
128
|
output_handler=[ToolRegistry()],
|
|
127
129
|
need_summary=False
|
|
128
130
|
)
|
|
@@ -16,7 +16,6 @@ from yaspin import yaspin # type: ignore
|
|
|
16
16
|
|
|
17
17
|
from jarvis.jarvis_agent import Agent
|
|
18
18
|
from jarvis.jarvis_agent.builtin_input_handler import builtin_input_handler
|
|
19
|
-
from jarvis.jarvis_agent.file_input_handler import file_input_handler
|
|
20
19
|
from jarvis.jarvis_agent.shell_input_handler import shell_input_handler
|
|
21
20
|
from jarvis.jarvis_platform.registry import PlatformRegistry
|
|
22
21
|
from jarvis.jarvis_git_utils.git_commiter import GitCommitTool
|
|
@@ -109,7 +108,6 @@ class CodeAgent:
|
|
|
109
108
|
platform=platform_instance,
|
|
110
109
|
input_handler=[
|
|
111
110
|
shell_input_handler,
|
|
112
|
-
file_input_handler,
|
|
113
111
|
builtin_input_handler
|
|
114
112
|
],
|
|
115
113
|
need_summary=need_summary
|
|
@@ -374,8 +372,8 @@ class CodeAgent:
|
|
|
374
372
|
final_ret += f"# 应用补丁:\n```diff\n{diff}\n```"
|
|
375
373
|
|
|
376
374
|
# 修改后的提示逻辑
|
|
377
|
-
addon_prompt = "
|
|
378
|
-
addon_prompt += "
|
|
375
|
+
addon_prompt = "如果对应语言有静态检查工具,请使用静态检查工具检查修改的代码,如果本次修改引入了警告和错误,请根据警告和错误信息修复代码\n"
|
|
376
|
+
addon_prompt += "在引入警告和错误都被修复的前提下,如果用户的需求未完成,请继续修改代码,如果已经完成,请终止,不要实现任何超出用户需求外的内容\n"
|
|
379
377
|
addon_prompt += "如果有任何信息不明确,调用工具获取信息\n"
|
|
380
378
|
addon_prompt += "每次响应必须且只能包含一个操作\n"
|
|
381
379
|
|
jarvis/jarvis_platform/base.py
CHANGED
|
@@ -1,15 +1,19 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
2
|
from abc import ABC, abstractmethod
|
|
3
3
|
import re
|
|
4
|
-
from typing import List, Tuple
|
|
4
|
+
from typing import Generator, List, Tuple
|
|
5
5
|
|
|
6
6
|
from yaspin import yaspin
|
|
7
|
-
|
|
7
|
+
|
|
8
|
+
from jarvis.jarvis_utils.config import get_max_input_token_count, get_pretty_output
|
|
8
9
|
from jarvis.jarvis_utils.embedding import split_text_into_chunks
|
|
9
10
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
10
11
|
from jarvis.jarvis_utils.utils import get_context_token_count, is_context_overflow, while_success, while_true
|
|
11
12
|
from jarvis.jarvis_utils.tag import ot, ct
|
|
12
|
-
|
|
13
|
+
from rich.live import Live
|
|
14
|
+
from rich.text import Text
|
|
15
|
+
from rich.panel import Panel
|
|
16
|
+
from rich import box
|
|
13
17
|
|
|
14
18
|
class BasePlatform(ABC):
|
|
15
19
|
"""Base class for large language models"""
|
|
@@ -33,7 +37,7 @@ class BasePlatform(ABC):
|
|
|
33
37
|
self.delete_chat()
|
|
34
38
|
|
|
35
39
|
@abstractmethod
|
|
36
|
-
def chat(self, message: str) -> str:
|
|
40
|
+
def chat(self, message: str) -> Generator[str, None, None]:
|
|
37
41
|
"""Execute conversation"""
|
|
38
42
|
raise NotImplementedError("chat is not implemented")
|
|
39
43
|
|
|
@@ -53,8 +57,6 @@ class BasePlatform(ABC):
|
|
|
53
57
|
return "错误:输入内容超过最大限制"
|
|
54
58
|
|
|
55
59
|
if input_token_count > get_max_input_token_count():
|
|
56
|
-
current_suppress_output = self.suppress_output
|
|
57
|
-
self.set_suppress_output(True)
|
|
58
60
|
max_chunk_size = get_max_input_token_count() - 1024 # 留出一些余量
|
|
59
61
|
min_chunk_size = max_chunk_size // 2 # 最小块大小设为最大块大小的一半
|
|
60
62
|
inputs = split_text_into_chunks(message, max_chunk_size, min_chunk_size)
|
|
@@ -70,32 +72,48 @@ class BasePlatform(ABC):
|
|
|
70
72
|
while_true(lambda: while_success(lambda: self.chat(f"<part_content>{input}</part_content>请返回已收到"), 5), 5)
|
|
71
73
|
spinner.text = "提交完成"
|
|
72
74
|
spinner.ok("✅")
|
|
73
|
-
self.
|
|
74
|
-
response = while_true(lambda: while_success(lambda: self.chat("内容已经全部提供完毕,请继续"), 5), 5)
|
|
75
|
-
|
|
75
|
+
response = while_true(lambda: while_success(lambda: self._chat("内容已经全部提供完毕,请继续"), 5), 5)
|
|
76
76
|
else:
|
|
77
|
-
response =
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
tokens_per_second = token_count / duration if duration > 0 else 0
|
|
87
|
-
except Exception as e:
|
|
88
|
-
PrettyOutput.print(f"Tokenization failed: {str(e)}", OutputType.WARNING)
|
|
89
|
-
token_count = 0
|
|
90
|
-
tokens_per_second = 0
|
|
91
|
-
|
|
92
|
-
# Print statistics
|
|
93
|
-
if not self.suppress_output:
|
|
94
|
-
PrettyOutput.print(
|
|
95
|
-
f"对话完成 - 耗时: {duration:.2f}秒, 输入字符数: {len(message)}, 输入Token数量: {input_token_count}, 输出字符数: {char_count}, 输出Token数量: {token_count}, 每秒Token数量: {tokens_per_second:.2f}",
|
|
96
|
-
OutputType.INFO,
|
|
77
|
+
response = ""
|
|
78
|
+
|
|
79
|
+
text_content = Text()
|
|
80
|
+
panel = Panel(
|
|
81
|
+
text_content,
|
|
82
|
+
title=f"[bold cyan]{self.name()}[/bold cyan]",
|
|
83
|
+
subtitle="[dim]思考中...[/dim]",
|
|
84
|
+
border_style="bright_blue",
|
|
85
|
+
box=box.ROUNDED
|
|
97
86
|
)
|
|
98
87
|
|
|
88
|
+
if not self.suppress_output:
|
|
89
|
+
if get_pretty_output():
|
|
90
|
+
with Live(panel, refresh_per_second=10, transient=False) as live:
|
|
91
|
+
for s in self.chat(message):
|
|
92
|
+
response += s
|
|
93
|
+
text_content.append(s, style="bright_white")
|
|
94
|
+
panel.subtitle = "[yellow]正在回答...[/yellow]"
|
|
95
|
+
live.update(panel)
|
|
96
|
+
end_time = time.time()
|
|
97
|
+
duration = end_time - start_time
|
|
98
|
+
char_count = len(response)
|
|
99
|
+
# Calculate token count and tokens per second
|
|
100
|
+
try:
|
|
101
|
+
token_count = get_context_token_count(response)
|
|
102
|
+
tokens_per_second = token_count / duration if duration > 0 else 0
|
|
103
|
+
except Exception as e:
|
|
104
|
+
PrettyOutput.print(f"Tokenization failed: {str(e)}", OutputType.WARNING)
|
|
105
|
+
token_count = 0
|
|
106
|
+
tokens_per_second = 0
|
|
107
|
+
panel.subtitle = f"[bold green]✓ 对话完成耗时: {duration:.2f}秒, 输入字符数: {len(message)}, 输入Token数量: {input_token_count}, 输出字符数: {char_count}, 输出Token数量: {token_count}, 每秒Token数量: {tokens_per_second:.2f}[/bold green]"
|
|
108
|
+
live.update(panel)
|
|
109
|
+
else:
|
|
110
|
+
for s in self.chat(message):
|
|
111
|
+
print(s, end="", flush=True)
|
|
112
|
+
response += s
|
|
113
|
+
print()
|
|
114
|
+
else:
|
|
115
|
+
for s in self.chat(message):
|
|
116
|
+
response += s
|
|
99
117
|
# Keep original think tag handling
|
|
100
118
|
response = re.sub(ot("think")+r'.*?'+ct("think"), '', response, flags=re.DOTALL)
|
|
101
119
|
return response
|
jarvis/jarvis_platform/human.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import Generator, List, Tuple
|
|
3
3
|
import random
|
|
4
4
|
import string
|
|
5
5
|
from jarvis.jarvis_platform.base import BasePlatform
|
|
@@ -34,7 +34,7 @@ class HumanPlatform(BasePlatform):
|
|
|
34
34
|
else:
|
|
35
35
|
PrettyOutput.print(f"错误:不支持的模型: {model_name}", OutputType.ERROR)
|
|
36
36
|
|
|
37
|
-
def chat(self, message: str) -> str:
|
|
37
|
+
def chat(self, message: str) -> Generator[str, None, None]:
|
|
38
38
|
"""发送消息并获取人类响应"""
|
|
39
39
|
if not self.conversation_id:
|
|
40
40
|
self.conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
|
|
@@ -49,7 +49,8 @@ class HumanPlatform(BasePlatform):
|
|
|
49
49
|
prompt = f"{message} {session_info}\n\n请回复:"
|
|
50
50
|
|
|
51
51
|
response = get_multiline_input(prompt)
|
|
52
|
-
|
|
52
|
+
yield response
|
|
53
|
+
return None
|
|
53
54
|
|
|
54
55
|
def upload_files(self, file_list: List[str]) -> bool:
|
|
55
56
|
"""文件上传功能,人类平台不需要实际处理"""
|
jarvis/jarvis_platform/kimi.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
-
from typing import Dict, List, Tuple
|
|
2
|
+
from typing import Dict, Generator, List, Tuple
|
|
3
3
|
import requests # type: ignore
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
@@ -241,7 +241,7 @@ class KimiModel(BasePlatform):
|
|
|
241
241
|
return True
|
|
242
242
|
|
|
243
243
|
|
|
244
|
-
def chat(self, message: str) -> str:
|
|
244
|
+
def chat(self, message: str) -> Generator[str, None, None]:
|
|
245
245
|
"""Send message and get response"""
|
|
246
246
|
if not self.chat_id:
|
|
247
247
|
if not self._create_chat():
|
|
@@ -279,177 +279,29 @@ class KimiModel(BasePlatform):
|
|
|
279
279
|
|
|
280
280
|
try:
|
|
281
281
|
response = while_success(lambda: requests.post(url, headers=headers, json=payload, stream=True), sleep_time=5)
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
border_style="magenta",
|
|
295
|
-
box=box.ROUNDED)
|
|
296
|
-
|
|
297
|
-
with Live(panel, refresh_per_second=3, transient=False) as live:
|
|
298
|
-
for line in response.iter_lines():
|
|
299
|
-
if not line:
|
|
300
|
-
continue
|
|
301
|
-
|
|
302
|
-
line = line.decode('utf-8')
|
|
303
|
-
if not line.startswith("data: "):
|
|
304
|
-
continue
|
|
305
|
-
|
|
306
|
-
try:
|
|
307
|
-
data = json.loads(line[6:])
|
|
308
|
-
event = data.get("event")
|
|
309
|
-
|
|
310
|
-
if event == "cmpl":
|
|
311
|
-
# 处理补全文本
|
|
312
|
-
text = data.get("text", "")
|
|
313
|
-
if text:
|
|
314
|
-
full_response += text
|
|
315
|
-
text_content.append(text)
|
|
316
|
-
panel.subtitle = "生成中..."
|
|
317
|
-
live.update(panel)
|
|
318
|
-
|
|
319
|
-
elif event == "search_plus":
|
|
320
|
-
# 收集搜索结果
|
|
321
|
-
msg = data.get("msg", {})
|
|
322
|
-
if msg.get("type") == "get_res":
|
|
323
|
-
search_results.append({
|
|
324
|
-
"date": msg.get("date", ""),
|
|
325
|
-
"site_name": msg.get("site_name", ""),
|
|
326
|
-
"snippet": msg.get("snippet", ""),
|
|
327
|
-
"title": msg.get("title", ""),
|
|
328
|
-
"type": msg.get("type", ""),
|
|
329
|
-
"url": msg.get("url", "")
|
|
330
|
-
})
|
|
331
|
-
panel.subtitle = f"搜索中: 找到 {len(search_results)} 个结果"
|
|
332
|
-
live.update(panel)
|
|
333
|
-
|
|
334
|
-
elif event == "ref_docs":
|
|
335
|
-
# 收集引用来源
|
|
336
|
-
ref_cards = data.get("ref_cards", [])
|
|
337
|
-
for card in ref_cards:
|
|
338
|
-
ref_sources.append({
|
|
339
|
-
"idx_s": card.get("idx_s", ""),
|
|
340
|
-
"idx_z": card.get("idx_z", ""),
|
|
341
|
-
"ref_id": card.get("ref_id", ""),
|
|
342
|
-
"url": card.get("url", ""),
|
|
343
|
-
"title": card.get("title", ""),
|
|
344
|
-
"abstract": card.get("abstract", ""),
|
|
345
|
-
"source": card.get("source_label", ""),
|
|
346
|
-
"rag_segments": card.get("rag_segments", []),
|
|
347
|
-
"origin": card.get("origin", {})
|
|
348
|
-
})
|
|
349
|
-
panel.subtitle = f"分析引用: 找到 {len(ref_sources)} 个来源"
|
|
350
|
-
live.update(panel)
|
|
351
|
-
|
|
352
|
-
except json.JSONDecodeError:
|
|
353
|
-
continue
|
|
354
|
-
|
|
355
|
-
# 显示对话完成状态
|
|
356
|
-
panel.subtitle = "[bold green]回答完成[/bold green]"
|
|
357
|
-
live.update(panel)
|
|
358
|
-
else:
|
|
359
|
-
# 如果禁止输出,则静默处理
|
|
360
|
-
for line in response.iter_lines():
|
|
361
|
-
if not line:
|
|
362
|
-
continue
|
|
363
|
-
|
|
364
|
-
line = line.decode('utf-8')
|
|
365
|
-
if not line.startswith("data: "):
|
|
366
|
-
continue
|
|
367
|
-
|
|
368
|
-
try:
|
|
369
|
-
data = json.loads(line[6:])
|
|
370
|
-
event = data.get("event")
|
|
371
|
-
|
|
372
|
-
if event == "cmpl":
|
|
373
|
-
# 处理补全文本
|
|
374
|
-
text = data.get("text", "")
|
|
375
|
-
if text:
|
|
376
|
-
full_response += text
|
|
377
|
-
|
|
378
|
-
elif event == "search_plus":
|
|
379
|
-
# 收集搜索结果
|
|
380
|
-
msg = data.get("msg", {})
|
|
381
|
-
if msg.get("type") == "get_res":
|
|
382
|
-
search_results.append({
|
|
383
|
-
"date": msg.get("date", ""),
|
|
384
|
-
"site_name": msg.get("site_name", ""),
|
|
385
|
-
"snippet": msg.get("snippet", ""),
|
|
386
|
-
"title": msg.get("title", ""),
|
|
387
|
-
"type": msg.get("type", ""),
|
|
388
|
-
"url": msg.get("url", "")
|
|
389
|
-
})
|
|
390
|
-
|
|
391
|
-
elif event == "ref_docs":
|
|
392
|
-
# 收集引用来源
|
|
393
|
-
ref_cards = data.get("ref_cards", [])
|
|
394
|
-
for card in ref_cards:
|
|
395
|
-
ref_sources.append({
|
|
396
|
-
"idx_s": card.get("idx_s", ""),
|
|
397
|
-
"idx_z": card.get("idx_z", ""),
|
|
398
|
-
"ref_id": card.get("ref_id", ""),
|
|
399
|
-
"url": card.get("url", ""),
|
|
400
|
-
"title": card.get("title", ""),
|
|
401
|
-
"abstract": card.get("abstract", ""),
|
|
402
|
-
"source": card.get("source_label", ""),
|
|
403
|
-
"rag_segments": card.get("rag_segments", []),
|
|
404
|
-
"origin": card.get("origin", {})
|
|
405
|
-
})
|
|
406
|
-
|
|
407
|
-
except json.JSONDecodeError:
|
|
408
|
-
continue
|
|
409
|
-
|
|
410
|
-
# 显示搜索结果摘要
|
|
411
|
-
if search_results and not self.suppress_output:
|
|
412
|
-
output = ["搜索结果:"]
|
|
413
|
-
for result in search_results:
|
|
414
|
-
output.append(f"- {result['title']}")
|
|
415
|
-
if result['date']:
|
|
416
|
-
output.append(f" 日期: {result['date']}")
|
|
417
|
-
output.append(f" 来源: {result['site_name']}")
|
|
418
|
-
if result['snippet']:
|
|
419
|
-
output.append(f" 摘要: {result['snippet']}")
|
|
420
|
-
output.append(f" 链接: {result['url']}")
|
|
421
|
-
output.append("")
|
|
422
|
-
PrettyOutput.print("\n".join(output), OutputType.PROGRESS)
|
|
423
|
-
|
|
424
|
-
# 显示引用来源
|
|
425
|
-
if ref_sources and not self.suppress_output:
|
|
426
|
-
output = ["引用来源:"]
|
|
427
|
-
for source in ref_sources:
|
|
428
|
-
output.append(f"- [{source['ref_id']}] {source['title']} ({source['source']})")
|
|
429
|
-
output.append(f" 链接: {source['url']}")
|
|
430
|
-
if source['abstract']:
|
|
431
|
-
output.append(f" 摘要: {source['abstract']}")
|
|
432
|
-
|
|
433
|
-
# 显示相关段落
|
|
434
|
-
if source['rag_segments']:
|
|
435
|
-
output.append(" 相关段落:")
|
|
436
|
-
for segment in source['rag_segments']:
|
|
437
|
-
text = segment.get('text', '').replace('\n', ' ').strip()
|
|
438
|
-
if text:
|
|
439
|
-
output.append(f" - {text}")
|
|
440
|
-
|
|
441
|
-
# 显示原文引用
|
|
442
|
-
origin = source['origin']
|
|
443
|
-
if origin:
|
|
444
|
-
text = origin.get('text', '')
|
|
445
|
-
if text:
|
|
446
|
-
output.append(f" 原文: {text}")
|
|
282
|
+
# 如果禁止输出,则静默处理
|
|
283
|
+
for line in response.iter_lines():
|
|
284
|
+
if not line:
|
|
285
|
+
continue
|
|
286
|
+
|
|
287
|
+
line = line.decode('utf-8')
|
|
288
|
+
if not line.startswith("data: "):
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
try:
|
|
292
|
+
data = json.loads(line[6:])
|
|
293
|
+
event = data.get("event")
|
|
447
294
|
|
|
448
|
-
|
|
295
|
+
if event == "cmpl":
|
|
296
|
+
# 处理补全文本
|
|
297
|
+
text = data.get("text", "")
|
|
298
|
+
if text:
|
|
299
|
+
yield text
|
|
300
|
+
except json.JSONDecodeError:
|
|
301
|
+
continue
|
|
449
302
|
|
|
450
|
-
PrettyOutput.print("\n".join(output), OutputType.PROGRESS)
|
|
451
303
|
|
|
452
|
-
return
|
|
304
|
+
return None
|
|
453
305
|
|
|
454
306
|
except Exception as e:
|
|
455
307
|
raise Exception(f"Chat failed: {str(e)}")
|
jarvis/jarvis_platform/openai.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
-
from typing import Dict, List, Tuple
|
|
2
|
+
from typing import Dict, Generator, List, Tuple
|
|
3
3
|
import os
|
|
4
4
|
from openai import OpenAI
|
|
5
5
|
from rich.live import Live
|
|
@@ -73,7 +73,7 @@ class OpenAIModel(BasePlatform):
|
|
|
73
73
|
self.system_message = message
|
|
74
74
|
self.messages.append({"role": "system", "content": self.system_message})
|
|
75
75
|
|
|
76
|
-
def chat(self, message: str) -> str:
|
|
76
|
+
def chat(self, message: str) -> Generator[str, None, None]:
|
|
77
77
|
"""Execute conversation"""
|
|
78
78
|
try:
|
|
79
79
|
|
|
@@ -87,38 +87,16 @@ class OpenAIModel(BasePlatform):
|
|
|
87
87
|
) # type: ignore
|
|
88
88
|
|
|
89
89
|
full_response = ""
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
title=f"[bold blue]{self.model_name}[/bold blue]",
|
|
96
|
-
subtitle="生成中...",
|
|
97
|
-
border_style="cyan",
|
|
98
|
-
box=box.ROUNDED)
|
|
99
|
-
|
|
100
|
-
with Live(panel, refresh_per_second=3, transient=False) as live:
|
|
101
|
-
for chunk in response:
|
|
102
|
-
if chunk.choices and chunk.choices[0].delta.content:
|
|
103
|
-
text = chunk.choices[0].delta.content
|
|
104
|
-
full_response += text
|
|
105
|
-
text_content.append(text)
|
|
106
|
-
live.update(panel)
|
|
107
|
-
|
|
108
|
-
# 显示对话完成状态
|
|
109
|
-
panel.subtitle = "[bold green]对话完成[/bold green]"
|
|
110
|
-
live.update(panel)
|
|
111
|
-
else:
|
|
112
|
-
# 如果禁止输出,则静默处理
|
|
113
|
-
for chunk in response:
|
|
114
|
-
if chunk.choices and chunk.choices[0].delta.content:
|
|
115
|
-
text = chunk.choices[0].delta.content
|
|
116
|
-
full_response += text
|
|
90
|
+
for chunk in response:
|
|
91
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
92
|
+
text = chunk.choices[0].delta.content
|
|
93
|
+
full_response += text
|
|
94
|
+
yield text
|
|
117
95
|
|
|
118
96
|
# Add assistant reply to history
|
|
119
97
|
self.messages.append({"role": "assistant", "content": full_response})
|
|
120
98
|
|
|
121
|
-
return
|
|
99
|
+
return None
|
|
122
100
|
|
|
123
101
|
except Exception as e:
|
|
124
102
|
PrettyOutput.print(f"对话失败:{str(e)}", OutputType.ERROR)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
-
from typing import Dict, List, Tuple
|
|
2
|
+
from typing import Dict, Generator, List, Tuple
|
|
3
3
|
import requests
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
@@ -386,7 +386,7 @@ class YuanbaoPlatform(BasePlatform):
|
|
|
386
386
|
PrettyOutput.print(f"生成签名时出错: {str(e)}", OutputType.ERROR)
|
|
387
387
|
raise e
|
|
388
388
|
|
|
389
|
-
def chat(self, message: str) -> str:
|
|
389
|
+
def chat(self, message: str) -> Generator[str, None, None]:
|
|
390
390
|
"""发送消息并获取响应,可选文件附件
|
|
391
391
|
|
|
392
392
|
参数:
|
|
@@ -452,86 +452,38 @@ class YuanbaoPlatform(BasePlatform):
|
|
|
452
452
|
error_msg += f", 响应: {response.text}"
|
|
453
453
|
raise Exception(error_msg)
|
|
454
454
|
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
# 处理思考中的消息
|
|
489
|
-
elif data.get("type") == "think":
|
|
490
|
-
think_content = data.get("content", "")
|
|
491
|
-
if think_content:
|
|
492
|
-
thinking_content = think_content
|
|
493
|
-
panel.subtitle = f"思考中: {thinking_content}"
|
|
494
|
-
live.update(panel)
|
|
495
|
-
|
|
496
|
-
except json.JSONDecodeError:
|
|
497
|
-
pass
|
|
498
|
-
|
|
499
|
-
# 检测结束标志
|
|
500
|
-
elif line_str == "data: [DONE]":
|
|
501
|
-
break
|
|
502
|
-
|
|
503
|
-
# 显示对话完成状态
|
|
504
|
-
panel.subtitle = "[bold green]对话完成[/bold green]"
|
|
505
|
-
live.update(panel)
|
|
506
|
-
else:
|
|
507
|
-
# 如果禁止输出,则静默处理
|
|
508
|
-
for line in response.iter_lines():
|
|
509
|
-
if not line:
|
|
510
|
-
continue
|
|
511
|
-
|
|
512
|
-
line_str = line.decode('utf-8')
|
|
513
|
-
|
|
514
|
-
# SSE格式的行通常以"data: "开头
|
|
515
|
-
if line_str.startswith("data: "):
|
|
516
|
-
try:
|
|
517
|
-
data_str = line_str[6:] # 移除"data: "前缀
|
|
518
|
-
data = json.loads(data_str)
|
|
519
|
-
|
|
520
|
-
# 处理文本类型的消息
|
|
521
|
-
if data.get("type") == "text":
|
|
522
|
-
is_text_block = True
|
|
523
|
-
msg = data.get("msg", "")
|
|
524
|
-
if msg:
|
|
525
|
-
full_response += msg
|
|
526
|
-
|
|
527
|
-
except json.JSONDecodeError:
|
|
528
|
-
pass
|
|
529
|
-
|
|
530
|
-
# 检测结束标志
|
|
531
|
-
elif line_str == "data: [DONE]":
|
|
532
|
-
break
|
|
533
|
-
|
|
534
|
-
return full_response
|
|
455
|
+
# 处理SSE流响应
|
|
456
|
+
for line in response.iter_lines():
|
|
457
|
+
if not line:
|
|
458
|
+
continue
|
|
459
|
+
|
|
460
|
+
line_str = line.decode('utf-8')
|
|
461
|
+
|
|
462
|
+
# SSE格式的行通常以"data: "开头
|
|
463
|
+
if line_str.startswith("data: "):
|
|
464
|
+
try:
|
|
465
|
+
data_str = line_str[6:] # 移除"data: "前缀
|
|
466
|
+
data = json.loads(data_str)
|
|
467
|
+
|
|
468
|
+
# 处理文本类型的消息
|
|
469
|
+
if data.get("type") == "text":
|
|
470
|
+
msg = data.get("msg", "")
|
|
471
|
+
if msg:
|
|
472
|
+
yield msg
|
|
473
|
+
|
|
474
|
+
# 处理思考中的消息
|
|
475
|
+
elif data.get("type") == "think":
|
|
476
|
+
think_content = data.get("content", "")
|
|
477
|
+
if think_content:
|
|
478
|
+
yield think_content
|
|
479
|
+
|
|
480
|
+
except json.JSONDecodeError:
|
|
481
|
+
pass
|
|
482
|
+
|
|
483
|
+
# 检测结束标志
|
|
484
|
+
elif line_str == "data: [DONE]":
|
|
485
|
+
return None
|
|
486
|
+
return None
|
|
535
487
|
|
|
536
488
|
except Exception as e:
|
|
537
489
|
raise Exception(f"对话失败: {str(e)}")
|
|
@@ -253,20 +253,26 @@ def main():
|
|
|
253
253
|
```
|
|
254
254
|
python -m jarvis.jarvis_tools.ask_codebase "登录功能在哪个文件实现?" --root_dir /path/to/codebase
|
|
255
255
|
```
|
|
256
|
+
如果没有提供问题参数,则会进入交互式多行输入模式
|
|
256
257
|
"""
|
|
257
258
|
import argparse
|
|
258
259
|
import sys
|
|
260
|
+
from jarvis.jarvis_utils.input import get_multiline_input
|
|
259
261
|
|
|
260
262
|
init_env()
|
|
261
263
|
|
|
262
264
|
# 创建命令行参数解析器
|
|
263
265
|
parser = argparse.ArgumentParser(description="智能代码库查询工具")
|
|
264
|
-
parser.add_argument("question", help="关于代码库的问题")
|
|
266
|
+
parser.add_argument("question", nargs="?", help="关于代码库的问题")
|
|
265
267
|
parser.add_argument("--root_dir", "-d", default=".", help="代码库根目录路径")
|
|
266
268
|
|
|
267
269
|
# 解析命令行参数
|
|
268
270
|
args = parser.parse_args()
|
|
269
271
|
|
|
272
|
+
# 如果没有提供问题参数,使用多行输入
|
|
273
|
+
if not args.question:
|
|
274
|
+
args.question = get_multiline_input("请输入关于代码库的问题:")
|
|
275
|
+
|
|
270
276
|
# 创建并执行工具
|
|
271
277
|
tool = AskCodebaseTool(auto_complete=False)
|
|
272
278
|
result = tool.execute({
|
jarvis/jarvis_utils/config.py
CHANGED
|
@@ -160,4 +160,13 @@ def get_max_big_content_size() -> int:
|
|
|
160
160
|
返回:
|
|
161
161
|
int: 最大大内容大小,默认为1MB
|
|
162
162
|
"""
|
|
163
|
-
return int(os.getenv('JARVIS_MAX_BIG_CONTENT_SIZE', '96000'))
|
|
163
|
+
return int(os.getenv('JARVIS_MAX_BIG_CONTENT_SIZE', '96000'))
|
|
164
|
+
|
|
165
|
+
def get_pretty_output() -> bool:
|
|
166
|
+
"""
|
|
167
|
+
获取是否启用PrettyOutput。
|
|
168
|
+
|
|
169
|
+
返回:
|
|
170
|
+
bool: 如果启用PrettyOutput则返回True,默认为True
|
|
171
|
+
"""
|
|
172
|
+
return os.getenv('JARVIS_PRETTY_OUTPUT', 'false') == 'true'
|
jarvis/jarvis_utils/embedding.py
CHANGED
|
@@ -69,16 +69,7 @@ def split_text_into_chunks(text: str, max_length: int = 512, min_length: int = 5
|
|
|
69
69
|
|
|
70
70
|
# 处理最后一个块
|
|
71
71
|
if current_chunk:
|
|
72
|
-
|
|
73
|
-
chunks.append(current_chunk)
|
|
74
|
-
elif chunks: # 如果最后一个块太短,尝试与前面的块合并
|
|
75
|
-
last_chunk = chunks[-1]
|
|
76
|
-
combined = last_chunk + current_chunk
|
|
77
|
-
combined_tokens = get_context_token_count(combined)
|
|
78
|
-
if combined_tokens <= max_length:
|
|
79
|
-
chunks[-1] = combined
|
|
80
|
-
else:
|
|
81
|
-
chunks.append(current_chunk)
|
|
72
|
+
chunks.append(current_chunk) # 直接添加最后一个块,无论长度如何
|
|
82
73
|
|
|
83
74
|
return chunks
|
|
84
75
|
|
jarvis/jarvis_utils/output.py
CHANGED
|
@@ -17,7 +17,9 @@ from rich.syntax import Syntax
|
|
|
17
17
|
from rich.style import Style as RichStyle
|
|
18
18
|
from pygments.lexers import guess_lexer
|
|
19
19
|
from pygments.util import ClassNotFound
|
|
20
|
+
from jarvis.jarvis_utils.config import get_pretty_output
|
|
20
21
|
from jarvis.jarvis_utils.globals import console, get_agent_list
|
|
22
|
+
# from rich.box import HEAVY
|
|
21
23
|
class OutputType(Enum):
|
|
22
24
|
"""
|
|
23
25
|
输出类型枚举,用于分类和样式化不同类型的消息。
|
|
@@ -125,7 +127,7 @@ class PrettyOutput:
|
|
|
125
127
|
except (ClassNotFound, Exception):
|
|
126
128
|
return default_lang
|
|
127
129
|
@staticmethod
|
|
128
|
-
def _format(output_type: OutputType, timestamp: bool = True) ->
|
|
130
|
+
def _format(output_type: OutputType, timestamp: bool = True) -> str:
|
|
129
131
|
"""
|
|
130
132
|
使用时间戳和图标格式化输出头。
|
|
131
133
|
|
|
@@ -136,14 +138,13 @@ class PrettyOutput:
|
|
|
136
138
|
返回:
|
|
137
139
|
Text: 格式化后的rich Text对象
|
|
138
140
|
"""
|
|
139
|
-
|
|
141
|
+
icon = PrettyOutput._ICONS.get(output_type, "")
|
|
142
|
+
formatted = f"{icon} "
|
|
140
143
|
if timestamp:
|
|
141
|
-
formatted
|
|
144
|
+
formatted+=f"[{datetime.now().strftime('%H:%M:%S')}][{output_type.value}]"
|
|
142
145
|
agent_info = get_agent_list()
|
|
143
146
|
if agent_info:
|
|
144
|
-
formatted
|
|
145
|
-
icon = PrettyOutput._ICONS.get(output_type, "")
|
|
146
|
-
formatted.append(f" {icon} ", style=output_type.value)
|
|
147
|
+
formatted+=f"[{agent_info}]"
|
|
147
148
|
return formatted
|
|
148
149
|
@staticmethod
|
|
149
150
|
def print(text: str, output_type: OutputType, timestamp: bool = True, lang: Optional[str] = None, traceback: bool = False):
|
|
@@ -158,35 +159,52 @@ class PrettyOutput:
|
|
|
158
159
|
traceback: 是否显示错误的回溯信息
|
|
159
160
|
"""
|
|
160
161
|
styles = {
|
|
161
|
-
OutputType.SYSTEM:
|
|
162
|
-
OutputType.CODE:
|
|
163
|
-
OutputType.RESULT:
|
|
164
|
-
OutputType.ERROR:
|
|
165
|
-
OutputType.INFO:
|
|
166
|
-
OutputType.PLANNING:
|
|
167
|
-
OutputType.PROGRESS:
|
|
168
|
-
OutputType.SUCCESS:
|
|
169
|
-
OutputType.WARNING:
|
|
170
|
-
OutputType.DEBUG:
|
|
171
|
-
OutputType.USER:
|
|
172
|
-
OutputType.TOOL:
|
|
162
|
+
OutputType.SYSTEM: dict( bgcolor="#1e2b3c"),
|
|
163
|
+
OutputType.CODE: dict( bgcolor="#1c2b1c"),
|
|
164
|
+
OutputType.RESULT: dict( bgcolor="#1c1c2b"),
|
|
165
|
+
OutputType.ERROR: dict( bgcolor="#2b1c1c"),
|
|
166
|
+
OutputType.INFO: dict( bgcolor="#2b2b1c", meta={"icon": "ℹ️"}),
|
|
167
|
+
OutputType.PLANNING: dict( bgcolor="#2b1c2b"),
|
|
168
|
+
OutputType.PROGRESS: dict( bgcolor="#1c1c1c"),
|
|
169
|
+
OutputType.SUCCESS: dict( bgcolor="#1c2b1c"),
|
|
170
|
+
OutputType.WARNING: dict( bgcolor="#2b2b1c"),
|
|
171
|
+
OutputType.DEBUG: dict( bgcolor="#1c1c1c"),
|
|
172
|
+
OutputType.USER: dict( bgcolor="#1c2b2b"),
|
|
173
|
+
OutputType.TOOL: dict( bgcolor="#1c2b2b"),
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
header_styles = {
|
|
177
|
+
OutputType.SYSTEM: RichStyle(color="bright_cyan", bgcolor="#1e2b3c", frame=True, meta={"icon": "🤖"}),
|
|
178
|
+
OutputType.CODE: RichStyle(color="green", bgcolor="#1c2b1c", frame=True, meta={"icon": "📝"}),
|
|
179
|
+
OutputType.RESULT: RichStyle(color="bright_blue", bgcolor="#1c1c2b", frame=True, meta={"icon": "✨"}),
|
|
180
|
+
OutputType.ERROR: RichStyle(color="red", frame=True, bgcolor="#2b1c1c", meta={"icon": "❌"}),
|
|
181
|
+
OutputType.INFO: RichStyle(color="gold1", frame=True, bgcolor="#2b2b1c", meta={"icon": "ℹ️"}),
|
|
182
|
+
OutputType.PLANNING: RichStyle(color="purple", bold=True, frame=True, bgcolor="#2b1c2b", meta={"icon": "📋"}),
|
|
183
|
+
OutputType.PROGRESS: RichStyle(color="white", encircle=True, frame=True, bgcolor="#1c1c1c", meta={"icon": "⏳"}),
|
|
184
|
+
OutputType.SUCCESS: RichStyle(color="bright_green", bold=True, strike=False, bgcolor="#1c2b1c", meta={"icon": "✅"}),
|
|
185
|
+
OutputType.WARNING: RichStyle(color="yellow", bold=True, blink2=True, bgcolor="#2b2b1c", meta={"icon": "⚠️"}),
|
|
186
|
+
OutputType.DEBUG: RichStyle(color="grey58", dim=True, conceal=True, bgcolor="#1c1c1c", meta={"icon": "🔍"}),
|
|
187
|
+
OutputType.USER: RichStyle(color="spring_green2", frame=True, bgcolor="#1c2b2b", meta={"icon": "👤"}),
|
|
188
|
+
OutputType.TOOL: RichStyle(color="dark_sea_green4", bgcolor="#1c2b2b", frame=True, meta={"icon": "🔧"}),
|
|
173
189
|
}
|
|
190
|
+
|
|
174
191
|
lang = lang if lang is not None else PrettyOutput._detect_language(text, default_lang='markdown')
|
|
175
|
-
header = PrettyOutput._format(output_type, timestamp)
|
|
176
|
-
content = Syntax(text, lang, theme="
|
|
192
|
+
header = Text(PrettyOutput._format(output_type, timestamp), style=header_styles[output_type])
|
|
193
|
+
content = Syntax(text, lang, theme="dracula", word_wrap=True, background_color=styles[output_type]["bgcolor"])
|
|
177
194
|
panel = Panel(
|
|
178
195
|
content,
|
|
179
|
-
|
|
180
|
-
border_style=styles[output_type],
|
|
196
|
+
border_style=header_styles[output_type],
|
|
181
197
|
title=header,
|
|
182
198
|
title_align="left",
|
|
183
199
|
padding=(0, 0),
|
|
184
200
|
highlight=True,
|
|
185
|
-
# box=HEAVY,
|
|
186
201
|
)
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
202
|
+
if get_pretty_output():
|
|
203
|
+
console.print(panel)
|
|
204
|
+
else:
|
|
205
|
+
console.print(header)
|
|
206
|
+
console.print(content)
|
|
207
|
+
if traceback:
|
|
190
208
|
console.print_exception()
|
|
191
209
|
@staticmethod
|
|
192
210
|
def section(title: str, output_type: OutputType = OutputType.INFO):
|
|
@@ -197,23 +215,13 @@ class PrettyOutput:
|
|
|
197
215
|
title: 章节标题文本
|
|
198
216
|
output_type: 输出类型(影响样式)
|
|
199
217
|
"""
|
|
218
|
+
text = Text(title, style=output_type.value, justify="center")
|
|
200
219
|
panel = Panel(
|
|
201
|
-
|
|
220
|
+
text,
|
|
202
221
|
border_style=output_type.value
|
|
203
222
|
)
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
def _get_style(output_type: OutputType) -> RichStyle:
|
|
210
|
-
"""
|
|
211
|
-
获取预定义的RichStyle用于输出类型。
|
|
212
|
-
|
|
213
|
-
参数:
|
|
214
|
-
output_type: 要获取样式的输出类型
|
|
215
|
-
|
|
216
|
-
返回:
|
|
217
|
-
RichStyle: 对应的样式
|
|
218
|
-
"""
|
|
219
|
-
return console.get_style(output_type.value)
|
|
223
|
+
if get_pretty_output():
|
|
224
|
+
console.print(panel)
|
|
225
|
+
else:
|
|
226
|
+
console.print(text)
|
|
227
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: jarvis-ai-assistant
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.176
|
|
4
4
|
Summary: Jarvis: An AI assistant that uses tools to interact with the system
|
|
5
5
|
Home-page: https://github.com/skyfireitdiy/Jarvis
|
|
6
6
|
Author: skyfire
|
|
@@ -213,7 +213,8 @@ OPENAI_API_BASE=https://api.openai.com/v1 # 可选,默认为官方API地址
|
|
|
213
213
|
| `JARVIS_CONFIRM_BEFORE_APPLY_PATCH` | true | 应用补丁前是否需要确认 |
|
|
214
214
|
| `JARVIS_MAX_TOOL_CALL_COUNT` | 20 | 最大连续工具调用次数 |
|
|
215
215
|
| `JARVIS_AUTO_UPDATE` | true | 是否自动更新Jarvis(仅在以git仓库方式安装时有效) |
|
|
216
|
-
| `JARVIS_MAX_BIG_CONTENT_SIZE` |
|
|
216
|
+
| `JARVIS_MAX_BIG_CONTENT_SIZE` | 96000 | 最大大内容大小 |
|
|
217
|
+
| `JARVIS_PRETTY_OUTPUT` | false | 是否启用PrettyOutput |
|
|
217
218
|
|
|
218
219
|
所有配置编写到`~/.jarvis/env`文件中即可生效。
|
|
219
220
|
|
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
jarvis/__init__.py,sha256=
|
|
2
|
-
jarvis/jarvis_agent/__init__.py,sha256=
|
|
1
|
+
jarvis/__init__.py,sha256=t4h_MblKCYi1ODQZesEYCj6v7ydz3wTPEIcWq22dQ3U,74
|
|
2
|
+
jarvis/jarvis_agent/__init__.py,sha256=ImVbBD4C5iq0z0DbtzIdBT2gzN7UcnWs9YeSgjoQxNE,29064
|
|
3
3
|
jarvis/jarvis_agent/builtin_input_handler.py,sha256=KhvlV_QdB3P-M0TCkWvdxidNie1jU7KoMOqTIXCpwwA,1529
|
|
4
|
-
jarvis/jarvis_agent/
|
|
5
|
-
jarvis/jarvis_agent/jarvis.py,sha256=IuM-A9KZd1jqOFszB2l-Ua1FE5uVGvvNia1SSgp7Jgg,5963
|
|
4
|
+
jarvis/jarvis_agent/jarvis.py,sha256=r1fcQtPCWOMKVC9SdzUdjutaz5C3STuL7C9CqDXDLwE,5920
|
|
6
5
|
jarvis/jarvis_agent/main.py,sha256=IAD59fEMWWSSAtHJhOQMPs_NMoKtcYjrxlTvhCHEVII,2626
|
|
7
6
|
jarvis/jarvis_agent/output_handler.py,sha256=7qori-RGrQmdiFepoEe3oPPKJIvRt90l_JDmvCoa4zA,1219
|
|
8
7
|
jarvis/jarvis_agent/shell_input_handler.py,sha256=pi3AtPKrkKc6K9e99S1djKXQ_XrxtP6FrSWebQmRT6E,1261
|
|
9
8
|
jarvis/jarvis_code_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
jarvis/jarvis_code_agent/code_agent.py,sha256=
|
|
9
|
+
jarvis/jarvis_code_agent/code_agent.py,sha256=FZS8kR1ZU_k1La3A7eQctRH84LRwqpbwN_Q4jpMFByY,16994
|
|
11
10
|
jarvis/jarvis_code_analysis/code_review.py,sha256=68GCjxdg-n2hoMoCSGWpH_ZdQArc4G09E9Wwhypk4eY,30135
|
|
12
11
|
jarvis/jarvis_code_analysis/checklists/__init__.py,sha256=cKQ_FOGy5TQgM-YkRCqORo-mUOZaPAJ9VDmZoFX58us,78
|
|
13
12
|
jarvis/jarvis_code_analysis/checklists/c_cpp.py,sha256=SXPpYCNeCtU1PpKdKPiYDuOybfY9vaL0ejDn4imxDwA,1317
|
|
@@ -49,18 +48,18 @@ jarvis/jarvis_methodology/main.py,sha256=QJUMIb9o8JO-l207X5UIbazZKJYKG3F4iuUKtkm
|
|
|
49
48
|
jarvis/jarvis_multi_agent/__init__.py,sha256=Xab5sFltJmX_9MoXqanmZs6FqKfUb2v_pG29Vk8ZXaw,4311
|
|
50
49
|
jarvis/jarvis_multi_agent/main.py,sha256=Z6N5VMjzaernnRjPkqgYRv09cIhWIFQ6a__AqHA8xrQ,1567
|
|
51
50
|
jarvis/jarvis_platform/__init__.py,sha256=0YnsUoM4JkIBOtImFdjfuDbrqQZT3dEaAwSJ62DrpCc,104
|
|
52
|
-
jarvis/jarvis_platform/base.py,sha256=
|
|
53
|
-
jarvis/jarvis_platform/human.py,sha256=
|
|
54
|
-
jarvis/jarvis_platform/kimi.py,sha256=
|
|
55
|
-
jarvis/jarvis_platform/openai.py,sha256=
|
|
51
|
+
jarvis/jarvis_platform/base.py,sha256=egcKQ8sUjANArAR48cbddmvsX7s_LexyVefztUAGBKY,6560
|
|
52
|
+
jarvis/jarvis_platform/human.py,sha256=1Jh9xigQaU78WVvsIMaVH0i6QRhaSA1oaErv9BdntF8,2565
|
|
53
|
+
jarvis/jarvis_platform/kimi.py,sha256=p18Ydb_0rgnK3-WZXKUtTBQTh7Da33O277PKWOMqBhA,13106
|
|
54
|
+
jarvis/jarvis_platform/openai.py,sha256=ln-OYxZkWaSY9oDoBPMmMh2TDGN40iTtUURLk8batvQ,4159
|
|
56
55
|
jarvis/jarvis_platform/registry.py,sha256=UjCdPT9WIRxU-F0uuPpKmKRRCcNNxjr-bRTEPgRSNx4,7740
|
|
57
|
-
jarvis/jarvis_platform/yuanbao.py,sha256=
|
|
56
|
+
jarvis/jarvis_platform/yuanbao.py,sha256=0ZXq1seY-gmDLMS2Vq8cu7wLerfoWePScRZIahmkU4s,21668
|
|
58
57
|
jarvis/jarvis_platform_manager/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
58
|
jarvis/jarvis_platform_manager/main.py,sha256=VD0hKhLb8SevdFSYDXLRj3xwjBR1DTcRVgyn_Vp55VE,25593
|
|
60
59
|
jarvis/jarvis_smart_shell/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
61
60
|
jarvis/jarvis_smart_shell/main.py,sha256=uq5NCdNAdcHqvtG0zpajz85SigSKH1SSAcEpyHa_BOc,5180
|
|
62
61
|
jarvis/jarvis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
63
|
-
jarvis/jarvis_tools/ask_codebase.py,sha256=
|
|
62
|
+
jarvis/jarvis_tools/ask_codebase.py,sha256=fD3E4oY1o3-rVuzdgZKMmK4p1GtaW8PyCFyCDJKsM4U,9976
|
|
64
63
|
jarvis/jarvis_tools/ask_user.py,sha256=cWSLG33b79IbIZEWsSNV5RHvGX6eo3nTM8TUhOMnGh8,2167
|
|
65
64
|
jarvis/jarvis_tools/base.py,sha256=SR4dmrgYj3lNmtVDhHtItPvptTqCfw5SGRhgPT3I6ss,1189
|
|
66
65
|
jarvis/jarvis_tools/chdir.py,sha256=wYVBqWF5kaUkKqH3cUAOKUsACzYsFtCCJJyd8UJsp4o,2706
|
|
@@ -85,19 +84,19 @@ jarvis/jarvis_tools/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG
|
|
|
85
84
|
jarvis/jarvis_tools/cli/main.py,sha256=lk1MKpL_--JQornbGZGREgjLIhSsB3OJU-GFUfo64IE,5312
|
|
86
85
|
jarvis/jarvis_utils/__init__.py,sha256=l-fsyQ-KzyqAhrJYur8eZAqsgaifGzSm24R2qtRGJ0g,849
|
|
87
86
|
jarvis/jarvis_utils/builtin_replace_map.py,sha256=A-cJ8deht2vDl2iKRhoZ7qECyJ6sboVH5Zx-L9vIBUs,4314
|
|
88
|
-
jarvis/jarvis_utils/config.py,sha256=
|
|
89
|
-
jarvis/jarvis_utils/embedding.py,sha256=
|
|
87
|
+
jarvis/jarvis_utils/config.py,sha256=EdTMfjGHn79CSz-JNZW86LVC31b5di-ig3lH06T50DU,4772
|
|
88
|
+
jarvis/jarvis_utils/embedding.py,sha256=s7ze8-talEED9VXZm1QK5tPdfyj6sXJLP031tDkXeI4,3831
|
|
90
89
|
jarvis/jarvis_utils/file_processors.py,sha256=tSZSMJ4qCJ_lXI0dyLgJ0j5qEh6CDXDSVI7vQiFmcuQ,2976
|
|
91
90
|
jarvis/jarvis_utils/git_utils.py,sha256=MxhUcQ_gFUFyBxBiorEJ1wUk9a2TerFdq3-Z11FB-AE,11324
|
|
92
91
|
jarvis/jarvis_utils/globals.py,sha256=Zs0chxA_giYiolYvawFFpcnTWgCUnn6GEusAh42jbz8,2275
|
|
93
92
|
jarvis/jarvis_utils/input.py,sha256=qGf2q-yWhgT-OX-j_WYi7aZ11jYmuFNiMz2_W1nUOiM,7432
|
|
94
93
|
jarvis/jarvis_utils/methodology.py,sha256=9dmtj6Ei2CRUdQP9TA_xToqZPYcm5_DQovwnRkEShsA,8626
|
|
95
|
-
jarvis/jarvis_utils/output.py,sha256=
|
|
94
|
+
jarvis/jarvis_utils/output.py,sha256=ervq4aQ4t2q3DskoWqATKYAfKhLtskyQwPhGtwFp_ck,8340
|
|
96
95
|
jarvis/jarvis_utils/tag.py,sha256=YJHmuedLb7_AiqvKQetHr4R1FxyzIh7HN0RRkWMmYbU,429
|
|
97
96
|
jarvis/jarvis_utils/utils.py,sha256=iIcefLa1kvALkmQ19BLWDplmxrcOjtxWAgBIxrs8ytg,4439
|
|
98
|
-
jarvis_ai_assistant-0.1.
|
|
99
|
-
jarvis_ai_assistant-0.1.
|
|
100
|
-
jarvis_ai_assistant-0.1.
|
|
101
|
-
jarvis_ai_assistant-0.1.
|
|
102
|
-
jarvis_ai_assistant-0.1.
|
|
103
|
-
jarvis_ai_assistant-0.1.
|
|
97
|
+
jarvis_ai_assistant-0.1.176.dist-info/licenses/LICENSE,sha256=AGgVgQmTqFvaztRtCAXsAMryUymB18gZif7_l2e1XOg,1063
|
|
98
|
+
jarvis_ai_assistant-0.1.176.dist-info/METADATA,sha256=ku5RGDDI7kKDPucm9onv6KiUqx_VDzTHP0BV7xt74uY,14602
|
|
99
|
+
jarvis_ai_assistant-0.1.176.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
|
100
|
+
jarvis_ai_assistant-0.1.176.dist-info/entry_points.txt,sha256=rjj61tZ7ahLi1R-JkJmX-IzIPPHD8mnwDZap1CnMe2s,973
|
|
101
|
+
jarvis_ai_assistant-0.1.176.dist-info/top_level.txt,sha256=1BOxyWfzOP_ZXj8rVTDnNCJ92bBGB0rwq8N1PCpoMIs,7
|
|
102
|
+
jarvis_ai_assistant-0.1.176.dist-info/RECORD,,
|
|
@@ -1,108 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import os
|
|
5
|
-
import re
|
|
6
|
-
from typing import Any, Tuple
|
|
7
|
-
|
|
8
|
-
from yaspin import yaspin
|
|
9
|
-
|
|
10
|
-
from jarvis.jarvis_tools.file_operation import FileOperationTool
|
|
11
|
-
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
12
|
-
from jarvis.jarvis_utils.utils import is_context_overflow
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def file_input_handler(user_input: str, agent: Any) -> Tuple[str, bool]:
|
|
16
|
-
"""Process user input containing file references and read file contents.
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
user_input: Input string that may contain file references in format:
|
|
20
|
-
- 'file_path' (whole file)
|
|
21
|
-
- 'file_path:start_line,end_line' (line range)
|
|
22
|
-
- 'file_path:start_line:end_line' (alternative range format)
|
|
23
|
-
agent: Agent object for further processing (currently unused)
|
|
24
|
-
|
|
25
|
-
Returns:
|
|
26
|
-
Tuple[str, bool]:
|
|
27
|
-
- Processed prompt string with file contents prepended
|
|
28
|
-
- Boolean indicating if context overflow occurred
|
|
29
|
-
"""
|
|
30
|
-
prompt = user_input
|
|
31
|
-
files = []
|
|
32
|
-
|
|
33
|
-
file_refs = re.findall(r"'([^']+)'", user_input)
|
|
34
|
-
for ref in file_refs:
|
|
35
|
-
# Handle file:start,end or file:start:end format
|
|
36
|
-
if ':' in ref:
|
|
37
|
-
file_path, line_range = ref.split(':', 1)
|
|
38
|
-
# Initialize with default values
|
|
39
|
-
start_line = 1 # 1-based
|
|
40
|
-
end_line = -1
|
|
41
|
-
|
|
42
|
-
# Process line range if specified
|
|
43
|
-
if ',' in line_range or ':' in line_range:
|
|
44
|
-
try:
|
|
45
|
-
raw_start, raw_end = map(int, re.split(r'[,:]', line_range))
|
|
46
|
-
|
|
47
|
-
# Handle special values and Python-style negative indices
|
|
48
|
-
try:
|
|
49
|
-
with open(file_path, 'r', encoding='utf-8', errors="ignore") as f:
|
|
50
|
-
total_lines = len(f.readlines())
|
|
51
|
-
except FileNotFoundError:
|
|
52
|
-
PrettyOutput.print(f"文件不存在: {file_path}", OutputType.WARNING)
|
|
53
|
-
continue
|
|
54
|
-
# Process start line (0 means whole file, negative means from end)
|
|
55
|
-
if raw_start == 0: # 0表示整个文件
|
|
56
|
-
start_line = 1
|
|
57
|
-
end_line = total_lines
|
|
58
|
-
else:
|
|
59
|
-
start_line = raw_start if raw_start > 0 else total_lines + raw_start + 1
|
|
60
|
-
|
|
61
|
-
# Process end line
|
|
62
|
-
if raw_end == 0: # 0表示整个文件(如果start也是0)
|
|
63
|
-
end_line = total_lines
|
|
64
|
-
else:
|
|
65
|
-
end_line = raw_end if raw_end > 0 else total_lines + raw_end + 1
|
|
66
|
-
|
|
67
|
-
# Auto-correct ranges
|
|
68
|
-
start_line = max(1, min(start_line, total_lines))
|
|
69
|
-
end_line = max(start_line, min(end_line, total_lines))
|
|
70
|
-
|
|
71
|
-
# Final validation
|
|
72
|
-
if start_line < 1 or end_line > total_lines or start_line > end_line:
|
|
73
|
-
raise ValueError
|
|
74
|
-
|
|
75
|
-
except:
|
|
76
|
-
continue
|
|
77
|
-
|
|
78
|
-
# Add file if it exists
|
|
79
|
-
if os.path.isfile(file_path):
|
|
80
|
-
files.append({
|
|
81
|
-
"path": file_path,
|
|
82
|
-
"start_line": start_line,
|
|
83
|
-
"end_line": end_line
|
|
84
|
-
})
|
|
85
|
-
else:
|
|
86
|
-
# Handle simple file path
|
|
87
|
-
if os.path.isfile(ref):
|
|
88
|
-
files.append({
|
|
89
|
-
"path": ref,
|
|
90
|
-
"start_line": 1, # 1-based
|
|
91
|
-
"end_line": -1
|
|
92
|
-
})
|
|
93
|
-
|
|
94
|
-
# Read and process files if any were found
|
|
95
|
-
if files:
|
|
96
|
-
with yaspin(text="正在读取文件...", color="cyan") as spinner:
|
|
97
|
-
old_prompt = prompt
|
|
98
|
-
result = FileOperationTool().execute({"operation":"read","files": files})
|
|
99
|
-
if result["success"]:
|
|
100
|
-
spinner.text = "文件读取完成"
|
|
101
|
-
spinner.ok("✅")
|
|
102
|
-
# Prepend file contents to prompt and check for overflow
|
|
103
|
-
prompt = result["stdout"] + "\n" + prompt
|
|
104
|
-
if is_context_overflow(prompt):
|
|
105
|
-
return old_prompt, False
|
|
106
|
-
|
|
107
|
-
return prompt, False
|
|
108
|
-
|
|
File without changes
|
{jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{jarvis_ai_assistant-0.1.175.dist-info → jarvis_ai_assistant-0.1.176.dist-info}/top_level.txt
RENAMED
|
File without changes
|