jarvis-ai-assistant 0.3.30__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +289 -87
- jarvis/jarvis_agent/agent_manager.py +17 -8
- jarvis/jarvis_agent/edit_file_handler.py +374 -86
- jarvis/jarvis_agent/event_bus.py +1 -1
- jarvis/jarvis_agent/file_context_handler.py +79 -0
- jarvis/jarvis_agent/jarvis.py +601 -43
- jarvis/jarvis_agent/main.py +32 -2
- jarvis/jarvis_agent/rewrite_file_handler.py +141 -0
- jarvis/jarvis_agent/run_loop.py +38 -5
- jarvis/jarvis_agent/share_manager.py +8 -1
- jarvis/jarvis_agent/stdio_redirect.py +295 -0
- jarvis/jarvis_agent/task_analyzer.py +5 -2
- jarvis/jarvis_agent/task_planner.py +496 -0
- jarvis/jarvis_agent/utils.py +5 -1
- jarvis/jarvis_agent/web_bridge.py +189 -0
- jarvis/jarvis_agent/web_output_sink.py +53 -0
- jarvis/jarvis_agent/web_server.py +751 -0
- jarvis/jarvis_c2rust/__init__.py +26 -0
- jarvis/jarvis_c2rust/cli.py +613 -0
- jarvis/jarvis_c2rust/collector.py +258 -0
- jarvis/jarvis_c2rust/library_replacer.py +1122 -0
- jarvis/jarvis_c2rust/llm_module_agent.py +1300 -0
- jarvis/jarvis_c2rust/optimizer.py +960 -0
- jarvis/jarvis_c2rust/scanner.py +1681 -0
- jarvis/jarvis_c2rust/transpiler.py +2325 -0
- jarvis/jarvis_code_agent/build_validation_config.py +133 -0
- jarvis/jarvis_code_agent/code_agent.py +1171 -94
- jarvis/jarvis_code_agent/code_analyzer/__init__.py +62 -0
- jarvis/jarvis_code_agent/code_analyzer/base_language.py +74 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/__init__.py +44 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/base.py +102 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/cmake.py +59 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/detector.py +125 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/fallback.py +69 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/go.py +38 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/java_gradle.py +44 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/java_maven.py +38 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/makefile.py +50 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/nodejs.py +93 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/python.py +129 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/rust.py +54 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/validator.py +154 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator.py +43 -0
- jarvis/jarvis_code_agent/code_analyzer/context_manager.py +363 -0
- jarvis/jarvis_code_agent/code_analyzer/context_recommender.py +18 -0
- jarvis/jarvis_code_agent/code_analyzer/dependency_analyzer.py +132 -0
- jarvis/jarvis_code_agent/code_analyzer/file_ignore.py +330 -0
- jarvis/jarvis_code_agent/code_analyzer/impact_analyzer.py +781 -0
- jarvis/jarvis_code_agent/code_analyzer/language_registry.py +185 -0
- jarvis/jarvis_code_agent/code_analyzer/language_support.py +89 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/__init__.py +31 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/c_cpp_language.py +231 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/go_language.py +183 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/python_language.py +219 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/rust_language.py +209 -0
- jarvis/jarvis_code_agent/code_analyzer/llm_context_recommender.py +451 -0
- jarvis/jarvis_code_agent/code_analyzer/symbol_extractor.py +77 -0
- jarvis/jarvis_code_agent/code_analyzer/tree_sitter_extractor.py +48 -0
- jarvis/jarvis_code_agent/lint.py +270 -8
- jarvis/jarvis_code_agent/utils.py +142 -0
- jarvis/jarvis_code_analysis/code_review.py +483 -569
- jarvis/jarvis_data/config_schema.json +97 -8
- jarvis/jarvis_git_utils/git_commiter.py +38 -26
- jarvis/jarvis_mcp/sse_mcp_client.py +2 -2
- jarvis/jarvis_mcp/stdio_mcp_client.py +1 -1
- jarvis/jarvis_memory_organizer/memory_organizer.py +1 -1
- jarvis/jarvis_multi_agent/__init__.py +239 -25
- jarvis/jarvis_multi_agent/main.py +37 -1
- jarvis/jarvis_platform/base.py +103 -51
- jarvis/jarvis_platform/openai.py +26 -1
- jarvis/jarvis_platform/yuanbao.py +1 -1
- jarvis/jarvis_platform_manager/service.py +2 -2
- jarvis/jarvis_rag/cli.py +4 -4
- jarvis/jarvis_sec/__init__.py +3605 -0
- jarvis/jarvis_sec/checkers/__init__.py +32 -0
- jarvis/jarvis_sec/checkers/c_checker.py +2680 -0
- jarvis/jarvis_sec/checkers/rust_checker.py +1108 -0
- jarvis/jarvis_sec/cli.py +116 -0
- jarvis/jarvis_sec/report.py +257 -0
- jarvis/jarvis_sec/status.py +264 -0
- jarvis/jarvis_sec/types.py +20 -0
- jarvis/jarvis_sec/workflow.py +219 -0
- jarvis/jarvis_stats/cli.py +1 -1
- jarvis/jarvis_stats/stats.py +1 -1
- jarvis/jarvis_stats/visualizer.py +1 -1
- jarvis/jarvis_tools/cli/main.py +1 -0
- jarvis/jarvis_tools/execute_script.py +46 -9
- jarvis/jarvis_tools/generate_new_tool.py +3 -1
- jarvis/jarvis_tools/read_code.py +275 -12
- jarvis/jarvis_tools/read_symbols.py +141 -0
- jarvis/jarvis_tools/read_webpage.py +5 -3
- jarvis/jarvis_tools/registry.py +73 -35
- jarvis/jarvis_tools/search_web.py +15 -11
- jarvis/jarvis_tools/sub_agent.py +24 -42
- jarvis/jarvis_tools/sub_code_agent.py +14 -13
- jarvis/jarvis_tools/virtual_tty.py +1 -1
- jarvis/jarvis_utils/config.py +187 -35
- jarvis/jarvis_utils/embedding.py +3 -0
- jarvis/jarvis_utils/git_utils.py +181 -6
- jarvis/jarvis_utils/globals.py +3 -3
- jarvis/jarvis_utils/http.py +1 -1
- jarvis/jarvis_utils/input.py +78 -2
- jarvis/jarvis_utils/methodology.py +25 -19
- jarvis/jarvis_utils/utils.py +644 -359
- {jarvis_ai_assistant-0.3.30.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/METADATA +85 -1
- jarvis_ai_assistant-0.7.0.dist-info/RECORD +192 -0
- {jarvis_ai_assistant-0.3.30.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/entry_points.txt +4 -0
- jarvis/jarvis_agent/config.py +0 -92
- jarvis/jarvis_tools/edit_file.py +0 -179
- jarvis/jarvis_tools/rewrite_file.py +0 -191
- jarvis_ai_assistant-0.3.30.dist-info/RECORD +0 -137
- {jarvis_ai_assistant-0.3.30.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.3.30.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.3.30.dist-info → jarvis_ai_assistant-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -3,10 +3,12 @@ from typing import Optional
|
|
|
3
3
|
|
|
4
4
|
import typer
|
|
5
5
|
import yaml # type: ignore[import-untyped]
|
|
6
|
+
import os
|
|
6
7
|
|
|
7
8
|
from jarvis.jarvis_multi_agent import MultiAgent
|
|
8
9
|
from jarvis.jarvis_utils.input import get_multiline_input
|
|
9
10
|
from jarvis.jarvis_utils.utils import init_env
|
|
11
|
+
from jarvis.jarvis_utils.config import set_config
|
|
10
12
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
11
13
|
|
|
12
14
|
app = typer.Typer(help="多智能体系统启动器")
|
|
@@ -18,9 +20,39 @@ def cli(
|
|
|
18
20
|
user_input: Optional[str] = typer.Option(
|
|
19
21
|
None, "--input", "-i", help="用户输入(可选)"
|
|
20
22
|
),
|
|
23
|
+
model_group: Optional[str] = typer.Option(
|
|
24
|
+
None, "-g", "--llm-group", help="使用的模型组,覆盖配置文件中的设置"
|
|
25
|
+
),
|
|
26
|
+
non_interactive: bool = typer.Option(
|
|
27
|
+
False, "-n", "--non-interactive", help="启用非交互模式:用户无法与命令交互,脚本执行超时限制为5分钟"
|
|
28
|
+
),
|
|
21
29
|
):
|
|
22
30
|
"""从YAML配置文件初始化并运行多智能体系统"""
|
|
31
|
+
# CLI 标志:非交互模式(不依赖配置文件)
|
|
32
|
+
if non_interactive:
|
|
33
|
+
try:
|
|
34
|
+
os.environ["JARVIS_NON_INTERACTIVE"] = "true"
|
|
35
|
+
except Exception:
|
|
36
|
+
pass
|
|
37
|
+
# 注意:全局配置同步在 init_env 之后执行,避免被覆盖
|
|
38
|
+
# 非交互模式要求从命令行传入任务
|
|
39
|
+
if non_interactive and not (user_input and str(user_input).strip()):
|
|
40
|
+
PrettyOutput.print(
|
|
41
|
+
"非交互模式已启用:必须使用 --input 传入任务内容,因多行输入不可用。",
|
|
42
|
+
OutputType.ERROR,
|
|
43
|
+
)
|
|
44
|
+
raise typer.Exit(code=2)
|
|
23
45
|
init_env("欢迎使用 Jarvis-MultiAgent,您的多智能体系统已准备就绪!")
|
|
46
|
+
|
|
47
|
+
# 在初始化环境后同步 CLI 选项到全局配置,避免被 init_env 覆盖
|
|
48
|
+
try:
|
|
49
|
+
if non_interactive:
|
|
50
|
+
set_config("JARVIS_NON_INTERACTIVE", True)
|
|
51
|
+
if model_group:
|
|
52
|
+
set_config("JARVIS_LLM_GROUP", str(model_group))
|
|
53
|
+
except Exception:
|
|
54
|
+
# 静默忽略同步异常,不影响主流程
|
|
55
|
+
pass
|
|
24
56
|
|
|
25
57
|
try:
|
|
26
58
|
with open(config, "r", errors="ignore") as f:
|
|
@@ -34,7 +66,11 @@ def cli(
|
|
|
34
66
|
raise ValueError("必须指定main_agent作为主智能体")
|
|
35
67
|
|
|
36
68
|
# 创建并运行多智能体系统
|
|
37
|
-
multi_agent = MultiAgent(
|
|
69
|
+
multi_agent = MultiAgent(
|
|
70
|
+
agents_config,
|
|
71
|
+
main_agent_name,
|
|
72
|
+
common_system_prompt=str(config_data.get("common_system_prompt", "") or "")
|
|
73
|
+
)
|
|
38
74
|
final_input = (
|
|
39
75
|
user_input
|
|
40
76
|
if user_input is not None
|
jarvis/jarvis_platform/base.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
2
|
import re
|
|
3
|
+
import os
|
|
4
|
+
from datetime import datetime
|
|
3
5
|
from abc import ABC, abstractmethod
|
|
4
6
|
from types import TracebackType
|
|
5
7
|
from typing import Dict, Generator, List, Optional, Tuple, Type
|
|
@@ -17,9 +19,12 @@ from jarvis.jarvis_utils.config import (
|
|
|
17
19
|
get_pretty_output,
|
|
18
20
|
is_print_prompt,
|
|
19
21
|
is_immediate_abort,
|
|
22
|
+
is_save_session_history,
|
|
23
|
+
get_data_dir,
|
|
20
24
|
)
|
|
21
25
|
from jarvis.jarvis_utils.embedding import split_text_into_chunks
|
|
22
26
|
from jarvis.jarvis_utils.globals import set_in_chat, get_interrupt, console
|
|
27
|
+
import jarvis.jarvis_utils.globals as G
|
|
23
28
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
24
29
|
from jarvis.jarvis_utils.tag import ct, ot
|
|
25
30
|
from jarvis.jarvis_utils.utils import get_context_token_count, while_success, while_true
|
|
@@ -34,6 +39,7 @@ class BasePlatform(ABC):
|
|
|
34
39
|
self.web = False # 添加web属性,默认false
|
|
35
40
|
self._saved = False
|
|
36
41
|
self.model_group: Optional[str] = None
|
|
42
|
+
self._session_history_file: Optional[str] = None
|
|
37
43
|
|
|
38
44
|
def __enter__(self) -> Self:
|
|
39
45
|
"""Enter context manager"""
|
|
@@ -57,6 +63,7 @@ class BasePlatform(ABC):
|
|
|
57
63
|
def reset(self):
|
|
58
64
|
"""Reset model"""
|
|
59
65
|
self.delete_chat()
|
|
66
|
+
self._session_history_file = None
|
|
60
67
|
|
|
61
68
|
@abstractmethod
|
|
62
69
|
def chat(self, message: str) -> Generator[str, None, None]:
|
|
@@ -77,6 +84,11 @@ class BasePlatform(ABC):
|
|
|
77
84
|
|
|
78
85
|
start_time = time.time()
|
|
79
86
|
|
|
87
|
+
# 当输入为空白字符串时,打印警告并直接返回空字符串
|
|
88
|
+
if message.strip() == "":
|
|
89
|
+
PrettyOutput.print("输入为空白字符串,已忽略本次请求", OutputType.WARNING)
|
|
90
|
+
return ""
|
|
91
|
+
|
|
80
92
|
input_token_count = get_context_token_count(message)
|
|
81
93
|
|
|
82
94
|
if input_token_count > get_max_input_token_count(self.model_group):
|
|
@@ -127,7 +139,9 @@ class BasePlatform(ABC):
|
|
|
127
139
|
first_chunk = None
|
|
128
140
|
|
|
129
141
|
with Status(
|
|
130
|
-
f"🤔 {self.name()} 正在思考中...",
|
|
142
|
+
f"🤔 {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} 正在思考中...",
|
|
143
|
+
spinner="dots",
|
|
144
|
+
console=console,
|
|
131
145
|
):
|
|
132
146
|
try:
|
|
133
147
|
while True:
|
|
@@ -135,79 +149,66 @@ class BasePlatform(ABC):
|
|
|
135
149
|
if first_chunk:
|
|
136
150
|
break
|
|
137
151
|
except StopIteration:
|
|
152
|
+
self._append_session_history(message, "")
|
|
138
153
|
return ""
|
|
139
154
|
|
|
140
155
|
text_content = Text(overflow="fold")
|
|
141
156
|
panel = Panel(
|
|
142
157
|
text_content,
|
|
143
|
-
title=f"[bold cyan]{self.name()}[/bold cyan]",
|
|
158
|
+
title=f"[bold cyan]{(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()}[/bold cyan]",
|
|
144
159
|
subtitle="[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]",
|
|
145
160
|
border_style="bright_blue",
|
|
146
161
|
box=box.ROUNDED,
|
|
147
162
|
expand=True, # 允许面板自动调整大小
|
|
148
163
|
)
|
|
149
164
|
|
|
150
|
-
buffer = []
|
|
151
|
-
buffer_count = 0
|
|
152
165
|
with Live(panel, refresh_per_second=4, transient=False) as live:
|
|
166
|
+
|
|
167
|
+
def _update_panel_content(content: str):
|
|
168
|
+
text_content.append(content, style="bright_white")
|
|
169
|
+
# --- Scrolling Logic ---
|
|
170
|
+
# Calculate available height in the panel
|
|
171
|
+
max_text_height = (
|
|
172
|
+
console.height - 5
|
|
173
|
+
) # Leave space for borders/titles
|
|
174
|
+
if max_text_height <= 0:
|
|
175
|
+
max_text_height = 1
|
|
176
|
+
|
|
177
|
+
# Get the actual number of lines the text will wrap to
|
|
178
|
+
lines = text_content.wrap(
|
|
179
|
+
console,
|
|
180
|
+
console.width - 4 if console.width > 4 else 1,
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# If content overflows, truncate to show only the last few lines
|
|
184
|
+
if len(lines) > max_text_height:
|
|
185
|
+
# Rebuild the text from the wrapped lines to ensure visual consistency
|
|
186
|
+
# This correctly handles both wrapped long lines and explicit newlines
|
|
187
|
+
text_content.plain = "\n".join(
|
|
188
|
+
[line.plain for line in lines[-max_text_height:]]
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
panel.subtitle = (
|
|
192
|
+
"[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]"
|
|
193
|
+
)
|
|
194
|
+
live.update(panel)
|
|
195
|
+
|
|
153
196
|
# Process first chunk
|
|
154
197
|
response += first_chunk
|
|
155
|
-
|
|
156
|
-
|
|
198
|
+
if first_chunk:
|
|
199
|
+
_update_panel_content(first_chunk)
|
|
157
200
|
|
|
158
201
|
# Process rest of the chunks
|
|
159
202
|
for s in chat_iterator:
|
|
160
203
|
if not s:
|
|
161
204
|
continue
|
|
162
205
|
response += s # Accumulate the full response string
|
|
163
|
-
|
|
164
|
-
buffer_count += 1
|
|
165
|
-
|
|
166
|
-
# 积累一定量或达到最后再更新,减少闪烁
|
|
167
|
-
if buffer_count >= 5 or s == "":
|
|
168
|
-
# Append buffered content to the Text object
|
|
169
|
-
text_content.append(
|
|
170
|
-
"".join(buffer), style="bright_white"
|
|
171
|
-
)
|
|
172
|
-
buffer.clear()
|
|
173
|
-
buffer_count = 0
|
|
174
|
-
|
|
175
|
-
# --- Scrolling Logic ---
|
|
176
|
-
# Calculate available height in the panel
|
|
177
|
-
max_text_height = (
|
|
178
|
-
console.height - 5
|
|
179
|
-
) # Leave space for borders/titles
|
|
180
|
-
if max_text_height <= 0:
|
|
181
|
-
max_text_height = 1
|
|
182
|
-
|
|
183
|
-
# Get the actual number of lines the text will wrap to
|
|
184
|
-
lines = text_content.wrap(
|
|
185
|
-
console,
|
|
186
|
-
console.width - 4 if console.width > 4 else 1,
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
# If content overflows, truncate to show only the last few lines
|
|
190
|
-
if len(lines) > max_text_height:
|
|
191
|
-
# Rebuild the text from the wrapped lines to ensure visual consistency
|
|
192
|
-
# This correctly handles both wrapped long lines and explicit newlines
|
|
193
|
-
text_content.plain = "\n".join(
|
|
194
|
-
[line.plain for line in lines[-max_text_height:]]
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
panel.subtitle = (
|
|
198
|
-
"[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]"
|
|
199
|
-
)
|
|
200
|
-
live.update(panel)
|
|
206
|
+
_update_panel_content(s)
|
|
201
207
|
|
|
202
208
|
if is_immediate_abort() and get_interrupt():
|
|
209
|
+
self._append_session_history(message, response)
|
|
203
210
|
return response # Return the partial response immediately
|
|
204
211
|
|
|
205
|
-
# Ensure any remaining content in the buffer is displayed
|
|
206
|
-
if buffer:
|
|
207
|
-
text_content.append(
|
|
208
|
-
"".join(buffer), style="bright_white"
|
|
209
|
-
)
|
|
210
|
-
|
|
211
212
|
# At the end, display the entire response
|
|
212
213
|
text_content.plain = response
|
|
213
214
|
|
|
@@ -215,16 +216,18 @@ class BasePlatform(ABC):
|
|
|
215
216
|
duration = end_time - start_time
|
|
216
217
|
panel.subtitle = f"[bold green]✓ 对话完成耗时: {duration:.2f}秒[/bold green]"
|
|
217
218
|
live.update(panel)
|
|
219
|
+
console.print()
|
|
218
220
|
else:
|
|
219
221
|
# Print a clear prefix line before streaming model output (non-pretty mode)
|
|
220
222
|
console.print(
|
|
221
|
-
f"🤖 模型输出 - {self.name()} (按 Ctrl+C 中断)",
|
|
223
|
+
f"🤖 模型输出 - {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} (按 Ctrl+C 中断)",
|
|
222
224
|
soft_wrap=False,
|
|
223
225
|
)
|
|
224
226
|
for s in self.chat(message):
|
|
225
227
|
console.print(s, end="")
|
|
226
228
|
response += s
|
|
227
229
|
if is_immediate_abort() and get_interrupt():
|
|
230
|
+
self._append_session_history(message, response)
|
|
228
231
|
return response
|
|
229
232
|
console.print()
|
|
230
233
|
end_time = time.time()
|
|
@@ -234,6 +237,7 @@ class BasePlatform(ABC):
|
|
|
234
237
|
for s in self.chat(message):
|
|
235
238
|
response += s
|
|
236
239
|
if is_immediate_abort() and get_interrupt():
|
|
240
|
+
self._append_session_history(message, response)
|
|
237
241
|
return response
|
|
238
242
|
# Keep original think tag handling
|
|
239
243
|
response = re.sub(
|
|
@@ -242,6 +246,8 @@ class BasePlatform(ABC):
|
|
|
242
246
|
response = re.sub(
|
|
243
247
|
ot("thinking") + r".*?" + ct("thinking"), "", response, flags=re.DOTALL
|
|
244
248
|
)
|
|
249
|
+
# Save session history (input and full response)
|
|
250
|
+
self._append_session_history(message, response)
|
|
245
251
|
return response
|
|
246
252
|
|
|
247
253
|
def chat_until_success(self, message: str) -> str:
|
|
@@ -346,6 +352,52 @@ class BasePlatform(ABC):
|
|
|
346
352
|
"""Set web flag"""
|
|
347
353
|
self.web = web
|
|
348
354
|
|
|
355
|
+
def _append_session_history(self, user_input: str, model_output: str) -> None:
|
|
356
|
+
"""
|
|
357
|
+
Append the user input and model output to a session history file if enabled.
|
|
358
|
+
The file name is generated on first save and reused until reset.
|
|
359
|
+
"""
|
|
360
|
+
try:
|
|
361
|
+
if not is_save_session_history():
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
if self._session_history_file is None:
|
|
365
|
+
# Ensure session history directory exists under data directory
|
|
366
|
+
data_dir = get_data_dir()
|
|
367
|
+
session_dir = os.path.join(data_dir, "session_history")
|
|
368
|
+
os.makedirs(session_dir, exist_ok=True)
|
|
369
|
+
|
|
370
|
+
# Build a safe filename including platform, model and timestamp
|
|
371
|
+
try:
|
|
372
|
+
platform_name = type(self).platform_name()
|
|
373
|
+
except Exception:
|
|
374
|
+
platform_name = "unknown_platform"
|
|
375
|
+
|
|
376
|
+
try:
|
|
377
|
+
model_name = self.name()
|
|
378
|
+
except Exception:
|
|
379
|
+
model_name = "unknown_model"
|
|
380
|
+
|
|
381
|
+
safe_platform = re.sub(r"[^\w\-\.]+", "_", str(platform_name))
|
|
382
|
+
safe_model = re.sub(r"[^\w\-\.]+", "_", str(model_name))
|
|
383
|
+
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
384
|
+
|
|
385
|
+
self._session_history_file = os.path.join(
|
|
386
|
+
session_dir, f"session_history_{safe_platform}_{safe_model}_{ts}.log"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# Append record
|
|
390
|
+
with open(self._session_history_file, "a", encoding="utf-8", errors="ignore") as f:
|
|
391
|
+
ts_line = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
392
|
+
f.write(f"===== {ts_line} =====\n")
|
|
393
|
+
f.write("USER:\n")
|
|
394
|
+
f.write(f"{user_input}\n")
|
|
395
|
+
f.write("\nASSISTANT:\n")
|
|
396
|
+
f.write(f"{model_output}\n\n")
|
|
397
|
+
except Exception:
|
|
398
|
+
# Do not break chat flow if writing history fails
|
|
399
|
+
pass
|
|
400
|
+
|
|
349
401
|
@abstractmethod
|
|
350
402
|
def support_web(self) -> bool:
|
|
351
403
|
"""Check if platform supports web functionality"""
|
jarvis/jarvis_platform/openai.py
CHANGED
|
@@ -24,7 +24,32 @@ class OpenAIModel(BasePlatform):
|
|
|
24
24
|
self.base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
|
|
25
25
|
self.model_name = os.getenv("JARVIS_MODEL") or "gpt-4o"
|
|
26
26
|
|
|
27
|
-
|
|
27
|
+
# Optional: Inject extra HTTP headers via environment variable
|
|
28
|
+
# Expected format: OPENAI_EXTRA_HEADERS='{"Header-Name": "value", "X-Trace": "abc"}'
|
|
29
|
+
headers_str = os.getenv("OPENAI_EXTRA_HEADERS")
|
|
30
|
+
self.extra_headers: Dict[str, str] = {}
|
|
31
|
+
if headers_str:
|
|
32
|
+
try:
|
|
33
|
+
parsed = json.loads(headers_str)
|
|
34
|
+
if isinstance(parsed, dict):
|
|
35
|
+
# Ensure all header keys/values are strings
|
|
36
|
+
self.extra_headers = {str(k): str(v) for k, v in parsed.items()}
|
|
37
|
+
else:
|
|
38
|
+
PrettyOutput.print("OPENAI_EXTRA_HEADERS 应为 JSON 对象,如 {'X-Source':'jarvis'}", OutputType.WARNING)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
PrettyOutput.print(f"解析 OPENAI_EXTRA_HEADERS 失败: {e}", OutputType.WARNING)
|
|
41
|
+
|
|
42
|
+
# Initialize OpenAI client, try to pass default headers if SDK supports it
|
|
43
|
+
try:
|
|
44
|
+
if self.extra_headers:
|
|
45
|
+
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url, default_headers=self.extra_headers)
|
|
46
|
+
else:
|
|
47
|
+
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
|
48
|
+
except TypeError:
|
|
49
|
+
# Fallback: SDK version may not support default_headers
|
|
50
|
+
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
|
51
|
+
if self.extra_headers:
|
|
52
|
+
PrettyOutput.print("当前 OpenAI SDK 不支持 default_headers,未能注入额外 HTTP 头", OutputType.WARNING)
|
|
28
53
|
self.messages: List[Dict[str, str]] = []
|
|
29
54
|
self.system_message = ""
|
|
30
55
|
|
|
@@ -77,7 +77,7 @@ class YuanbaoPlatform(BasePlatform):
|
|
|
77
77
|
"Referer": f"https://yuanbao.tencent.com/chat/{self.agent_id}",
|
|
78
78
|
"X-Source": "web",
|
|
79
79
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
"Sec-Fetch-Site": "same-origin",
|
|
82
82
|
"Sec-Fetch-Mode": "cors",
|
|
83
83
|
"Sec-Fetch-Dest": "empty",
|
|
@@ -100,7 +100,7 @@ def start_service(
|
|
|
100
100
|
PrettyOutput.print(
|
|
101
101
|
f"Starting Jarvis API server on {host}:{port}", OutputType.SUCCESS
|
|
102
102
|
)
|
|
103
|
-
PrettyOutput.print("
|
|
103
|
+
PrettyOutput.print("本服务提供与 OpenAI 兼容的 API", OutputType.INFO)
|
|
104
104
|
|
|
105
105
|
if default_platform and default_model:
|
|
106
106
|
PrettyOutput.print(
|
|
@@ -151,7 +151,7 @@ def start_service(
|
|
|
151
151
|
if response:
|
|
152
152
|
f.write(f"\nResponse:\n{response}\n")
|
|
153
153
|
|
|
154
|
-
PrettyOutput.print(f"
|
|
154
|
+
PrettyOutput.print(f"会话已记录到 {log_file}", OutputType.INFO)
|
|
155
155
|
|
|
156
156
|
@app.get("/v1/models")
|
|
157
157
|
async def list_models() -> Dict[str, Any]:
|
jarvis/jarvis_rag/cli.py
CHANGED
|
@@ -54,10 +54,10 @@ _project_root = os.path.abspath(
|
|
|
54
54
|
if _project_root not in sys.path:
|
|
55
55
|
sys.path.insert(0, _project_root)
|
|
56
56
|
|
|
57
|
-
from jarvis.jarvis_platform.base import BasePlatform
|
|
58
|
-
from jarvis.jarvis_platform.registry import PlatformRegistry
|
|
59
|
-
from jarvis.jarvis_rag.llm_interface import LLMInterface
|
|
60
|
-
from jarvis.jarvis_rag.rag_pipeline import JarvisRAGPipeline
|
|
57
|
+
from jarvis.jarvis_platform.base import BasePlatform # noqa: E402
|
|
58
|
+
from jarvis.jarvis_platform.registry import PlatformRegistry # noqa: E402
|
|
59
|
+
from jarvis.jarvis_rag.llm_interface import LLMInterface # noqa: E402
|
|
60
|
+
from jarvis.jarvis_rag.rag_pipeline import JarvisRAGPipeline # noqa: E402
|
|
61
61
|
|
|
62
62
|
app = typer.Typer(
|
|
63
63
|
name="jarvis-rag",
|