jarvis-ai-assistant 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/__pycache__/__init__.cpython-313.pyc +0 -0
- jarvis/__pycache__/agent.cpython-313.pyc +0 -0
- jarvis/__pycache__/main.cpython-313.pyc +0 -0
- jarvis/__pycache__/zte_llm.cpython-313.pyc +0 -0
- jarvis/agent.py +93 -150
- jarvis/main.py +14 -89
- jarvis/models/__init__.py +11 -0
- jarvis/models/__pycache__/__init__.cpython-313.pyc +0 -0
- jarvis/models/__pycache__/base.cpython-313.pyc +0 -0
- jarvis/models/__pycache__/kimi.cpython-313.pyc +0 -0
- jarvis/models/base.py +19 -0
- jarvis/models/kimi.py +258 -0
- jarvis/tools/__init__.py +0 -4
- jarvis/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- jarvis/tools/__pycache__/base.cpython-313.pyc +0 -0
- jarvis/tools/__pycache__/sub_agent.cpython-313.pyc +0 -0
- jarvis/tools/base.py +0 -6
- {jarvis_ai_assistant-0.1.9.dist-info → jarvis_ai_assistant-0.1.11.dist-info}/METADATA +1 -6
- jarvis_ai_assistant-0.1.11.dist-info/RECORD +39 -0
- jarvis/models.py +0 -122
- jarvis/tools/bing_search.py +0 -38
- jarvis/tools/search.py +0 -132
- jarvis/tools/sub_agent.py +0 -83
- jarvis/tools/webpage.py +0 -76
- jarvis/zte_llm.py +0 -135
- jarvis_ai_assistant-0.1.9.dist-info/RECORD +0 -39
- {jarvis_ai_assistant-0.1.9.dist-info → jarvis_ai_assistant-0.1.11.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.9.dist-info → jarvis_ai_assistant-0.1.11.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.1.9.dist-info → jarvis_ai_assistant-0.1.11.dist-info}/top_level.txt +0 -0
jarvis/__init__.py
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
jarvis/agent.py
CHANGED
|
@@ -11,7 +11,7 @@ import os
|
|
|
11
11
|
from datetime import datetime
|
|
12
12
|
|
|
13
13
|
class Agent:
|
|
14
|
-
def __init__(self, model: BaseModel, tool_registry: ToolRegistry, name: str = "Jarvis"
|
|
14
|
+
def __init__(self, model: BaseModel, tool_registry: ToolRegistry, name: str = "Jarvis"):
|
|
15
15
|
"""Initialize Agent with a model, optional tool registry and name
|
|
16
16
|
|
|
17
17
|
Args:
|
|
@@ -23,29 +23,88 @@ class Agent:
|
|
|
23
23
|
self.model = model
|
|
24
24
|
self.tool_registry = tool_registry or ToolRegistry(model)
|
|
25
25
|
self.name = name
|
|
26
|
-
self.
|
|
26
|
+
self.prompt = ""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@staticmethod
|
|
30
|
+
def extract_tool_calls(content: str) -> List[Dict]:
|
|
31
|
+
"""从内容中提取工具调用,如果检测到多个工具调用则抛出异常,并返回工具调用之前的内容和工具调用"""
|
|
32
|
+
# 分割内容为行
|
|
33
|
+
lines = content.split('\n')
|
|
34
|
+
tool_call_lines = []
|
|
35
|
+
in_tool_call = False
|
|
36
|
+
|
|
37
|
+
# 逐行处理
|
|
38
|
+
for line in lines:
|
|
39
|
+
if '<START_TOOL_CALL>' in line:
|
|
40
|
+
in_tool_call = True
|
|
41
|
+
continue
|
|
42
|
+
elif '<END_TOOL_CALL>' in line:
|
|
43
|
+
if in_tool_call and tool_call_lines:
|
|
44
|
+
try:
|
|
45
|
+
# 直接解析YAML
|
|
46
|
+
tool_call_text = '\n'.join(tool_call_lines)
|
|
47
|
+
tool_call_data = yaml.safe_load(tool_call_text)
|
|
48
|
+
|
|
49
|
+
# 验证必要的字段
|
|
50
|
+
if "name" in tool_call_data and "arguments" in tool_call_data:
|
|
51
|
+
# 返回工具调用之前的内容和工具调用
|
|
52
|
+
return [{
|
|
53
|
+
"name": tool_call_data["name"],
|
|
54
|
+
"arguments": tool_call_data["arguments"]
|
|
55
|
+
}]
|
|
56
|
+
else:
|
|
57
|
+
PrettyOutput.print("工具调用缺少必要字段", OutputType.ERROR)
|
|
58
|
+
raise '工具调用缺少必要字段'
|
|
59
|
+
except yaml.YAMLError as e:
|
|
60
|
+
PrettyOutput.print(f"YAML解析错误: {str(e)}", OutputType.ERROR)
|
|
61
|
+
raise 'YAML解析错误'
|
|
62
|
+
except Exception as e:
|
|
63
|
+
PrettyOutput.print(f"处理工具调用时发生错误: {str(e)}", OutputType.ERROR)
|
|
64
|
+
raise '处理工具调用时发生错误'
|
|
65
|
+
in_tool_call = False
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
if in_tool_call:
|
|
69
|
+
tool_call_lines.append(line)
|
|
70
|
+
|
|
71
|
+
return []
|
|
72
|
+
|
|
73
|
+
def _call_model(self, message: str) -> str:
|
|
74
|
+
"""调用模型获取响应"""
|
|
75
|
+
try:
|
|
76
|
+
return self.model.chat(message)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
raise Exception(f"{self.name}: 模型调用失败: {str(e)}")
|
|
79
|
+
|
|
80
|
+
def run(self, user_input: str, file_list: Optional[List[str]] = None):
|
|
81
|
+
"""处理用户输入并返回响应,返回任务总结报告
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
user_input: 用户输入的任务描述
|
|
85
|
+
file_list: 可选的文件列表,默认为None
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
str: 任务总结报告
|
|
89
|
+
"""
|
|
90
|
+
self.clear_history()
|
|
91
|
+
|
|
92
|
+
if file_list:
|
|
93
|
+
self.model.upload_files(file_list)
|
|
94
|
+
|
|
95
|
+
# 显示任务开始
|
|
96
|
+
PrettyOutput.section(f"开始新任务: {self.name}", OutputType.PLANNING)
|
|
27
97
|
|
|
28
|
-
# 构建工具说明
|
|
29
98
|
tools_prompt = "可用工具:\n"
|
|
30
99
|
for tool in self.tool_registry.get_all_tools():
|
|
31
100
|
tools_prompt += f"- 名称: {tool['name']}\n"
|
|
32
101
|
tools_prompt += f" 描述: {tool['description']}\n"
|
|
33
102
|
tools_prompt += f" 参数: {tool['parameters']}\n"
|
|
34
103
|
|
|
35
|
-
self.
|
|
36
|
-
{
|
|
37
|
-
"role": "system",
|
|
38
|
-
"content": f"""你是 {name},一个严格遵循 ReAct 框架进行逐步推理和行动的 AI 助手。
|
|
104
|
+
self.prompt =f"""你是 {self.name},一个严格遵循 ReAct 框架进行逐步推理和行动的 AI 助手。
|
|
39
105
|
|
|
40
106
|
{tools_prompt}
|
|
41
107
|
|
|
42
|
-
关键规则:
|
|
43
|
-
‼️ 禁止创建虚假对话
|
|
44
|
-
‼️ 禁止假设用户回应
|
|
45
|
-
‼️ 禁止在没有实际用户输入时继续
|
|
46
|
-
‼️ 只回应用户实际说的内容
|
|
47
|
-
‼️ 每个动作后停止并等待
|
|
48
|
-
|
|
49
108
|
ReAct 框架:
|
|
50
109
|
1. 思考
|
|
51
110
|
- 分析当前情况
|
|
@@ -62,7 +121,7 @@ ReAct 框架:
|
|
|
62
121
|
- 只使用下面列出的工具
|
|
63
122
|
- 每次只执行一个工具
|
|
64
123
|
- 工具由用户手动执行
|
|
65
|
-
-
|
|
124
|
+
- 必须使用有效合法的YAML格式:
|
|
66
125
|
<START_TOOL_CALL>
|
|
67
126
|
name: tool_name
|
|
68
127
|
arguments:
|
|
@@ -103,7 +162,6 @@ arguments:
|
|
|
103
162
|
‼️ 工具调用必须是有效的YAML格式
|
|
104
163
|
‼️ 参数必须正确缩进
|
|
105
164
|
‼️ 使用YAML块样式(|)表示多行值
|
|
106
|
-
‼️ <END_TOOL_CALL>后的内容将被丢弃
|
|
107
165
|
‼️ 工具由用户手动执行
|
|
108
166
|
‼️ 等待用户提供工具执行结果
|
|
109
167
|
‼️ 不要假设或想象用户回应
|
|
@@ -125,174 +183,59 @@ arguments:
|
|
|
125
183
|
- 不要虚构对话
|
|
126
184
|
- 每个动作后停止
|
|
127
185
|
- 只在有实际用户输入时继续
|
|
128
|
-
"""
|
|
129
|
-
}
|
|
130
|
-
]
|
|
131
186
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
# 分割内容为行
|
|
136
|
-
lines = content.split('\n')
|
|
137
|
-
tool_call_lines = []
|
|
138
|
-
content_lines = [] # 存储工具调用之前的内容
|
|
139
|
-
in_tool_call = False
|
|
140
|
-
|
|
141
|
-
# 逐行处理
|
|
142
|
-
for line in lines:
|
|
143
|
-
content_lines.append(line) # 所有内容都添加到 content_lines
|
|
144
|
-
|
|
145
|
-
if '<START_TOOL_CALL>' in line:
|
|
146
|
-
in_tool_call = True
|
|
147
|
-
continue
|
|
148
|
-
elif '<END_TOOL_CALL>' in line:
|
|
149
|
-
if in_tool_call and tool_call_lines:
|
|
150
|
-
try:
|
|
151
|
-
# 直接解析YAML
|
|
152
|
-
tool_call_text = '\n'.join(tool_call_lines)
|
|
153
|
-
tool_call_data = yaml.safe_load(tool_call_text)
|
|
154
|
-
|
|
155
|
-
# 验证必要的字段
|
|
156
|
-
if "name" in tool_call_data and "arguments" in tool_call_data:
|
|
157
|
-
# 返回工具调用之前的内容和工具调用
|
|
158
|
-
return '\n'.join(content_lines), [{
|
|
159
|
-
"name": tool_call_data["name"],
|
|
160
|
-
"arguments": tool_call_data["arguments"]
|
|
161
|
-
}]
|
|
162
|
-
else:
|
|
163
|
-
PrettyOutput.print("工具调用缺少必要字段", OutputType.ERROR)
|
|
164
|
-
raise '工具调用缺少必要字段'
|
|
165
|
-
except yaml.YAMLError as e:
|
|
166
|
-
PrettyOutput.print(f"YAML解析错误: {str(e)}", OutputType.ERROR)
|
|
167
|
-
raise 'YAML解析错误'
|
|
168
|
-
except Exception as e:
|
|
169
|
-
PrettyOutput.print(f"处理工具调用时发生错误: {str(e)}", OutputType.ERROR)
|
|
170
|
-
raise '处理工具调用时发生错误'
|
|
171
|
-
in_tool_call = False
|
|
172
|
-
continue
|
|
187
|
+
任务:
|
|
188
|
+
{user_input}
|
|
189
|
+
"""
|
|
173
190
|
|
|
174
|
-
if in_tool_call:
|
|
175
|
-
tool_call_lines.append(line)
|
|
176
|
-
|
|
177
|
-
# 如果没有找到有效的工具调用,返回原始内容
|
|
178
|
-
return '\n'.join(content_lines), []
|
|
179
|
-
|
|
180
|
-
def _call_model(self, messages: List[Dict]) -> Dict:
|
|
181
|
-
"""调用模型获取响应"""
|
|
182
|
-
try:
|
|
183
|
-
return self.model.chat(
|
|
184
|
-
messages=messages,
|
|
185
|
-
)
|
|
186
|
-
except Exception as e:
|
|
187
|
-
raise Exception(f"{self.name}: 模型调用失败: {str(e)}")
|
|
188
|
-
|
|
189
|
-
def run(self, user_input: str) -> str:
|
|
190
|
-
"""处理用户输入并返回响应,返回任务总结报告"""
|
|
191
|
-
self.clear_history()
|
|
192
|
-
|
|
193
|
-
# 显示任务开始
|
|
194
|
-
PrettyOutput.section(f"开始新任务: {self.name}", OutputType.PLANNING)
|
|
195
|
-
|
|
196
|
-
self.messages.append({
|
|
197
|
-
"role": "user",
|
|
198
|
-
"content": user_input
|
|
199
|
-
})
|
|
200
191
|
|
|
201
192
|
while True:
|
|
202
193
|
try:
|
|
203
194
|
# 显示思考状态
|
|
204
195
|
PrettyOutput.print("分析任务...", OutputType.PROGRESS)
|
|
205
196
|
|
|
206
|
-
current_response = self._call_model(self.
|
|
197
|
+
current_response = self._call_model(self.prompt)
|
|
207
198
|
|
|
208
199
|
try:
|
|
209
200
|
result = Agent.extract_tool_calls(current_response)
|
|
210
201
|
except Exception as e:
|
|
211
202
|
PrettyOutput.print(f"工具调用错误: {str(e)}", OutputType.ERROR)
|
|
212
|
-
self.
|
|
213
|
-
"role": "user",
|
|
214
|
-
"content": f"工具调用错误: {str(e)}"
|
|
215
|
-
})
|
|
203
|
+
self.prompt = f"工具调用错误: {str(e)}"
|
|
216
204
|
continue
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
self.messages.append({
|
|
220
|
-
"role": "assistant",
|
|
221
|
-
"content": result[0]
|
|
222
|
-
})
|
|
223
|
-
|
|
224
205
|
|
|
225
|
-
if len(result
|
|
206
|
+
if len(result) > 0:
|
|
226
207
|
try:
|
|
227
208
|
# 显示工具调用
|
|
228
209
|
PrettyOutput.print("执行工具调用...", OutputType.PROGRESS)
|
|
229
|
-
tool_result = self.tool_registry.handle_tool_calls(result
|
|
210
|
+
tool_result = self.tool_registry.handle_tool_calls(result)
|
|
230
211
|
PrettyOutput.print(tool_result, OutputType.RESULT)
|
|
231
212
|
except Exception as e:
|
|
232
213
|
PrettyOutput.print(str(e), OutputType.ERROR)
|
|
233
214
|
tool_result = f"Tool call failed: {str(e)}"
|
|
234
215
|
|
|
235
|
-
self.
|
|
236
|
-
"role": "user",
|
|
237
|
-
"content": tool_result
|
|
238
|
-
})
|
|
216
|
+
self.prompt = tool_result
|
|
239
217
|
continue
|
|
240
218
|
|
|
241
219
|
# 获取用户输入
|
|
242
220
|
user_input = get_multiline_input(f"{self.name}: 您可以继续输入,或输入空行结束当前任务")
|
|
243
|
-
if not user_input:
|
|
244
|
-
# 只有子Agent才需要生成任务总结
|
|
245
|
-
if self.is_sub_agent:
|
|
246
|
-
PrettyOutput.print("生成任务总结...", OutputType.PROGRESS)
|
|
247
|
-
|
|
248
|
-
# 生成任务总结
|
|
249
|
-
summary_prompt = {
|
|
250
|
-
"role": "user",
|
|
251
|
-
"content": """任务已完成。请根据之前的分析和执行结果,提供一个简明的任务总结,包括:
|
|
252
|
-
|
|
253
|
-
1. 关键发现:
|
|
254
|
-
- 分析过程中的重要发现
|
|
255
|
-
- 工具执行的关键结果
|
|
256
|
-
- 发现的重要数据
|
|
257
|
-
|
|
258
|
-
2. 执行成果:
|
|
259
|
-
- 任务完成情况
|
|
260
|
-
- 具体实现结果
|
|
261
|
-
- 达成的目标
|
|
262
|
-
|
|
263
|
-
请直接描述事实和实际结果,保持简洁明了。"""
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
while True:
|
|
267
|
-
try:
|
|
268
|
-
summary = self._call_model(self.messages + [summary_prompt])
|
|
269
|
-
|
|
270
|
-
# 显示任务总结
|
|
271
|
-
PrettyOutput.section("任务总结", OutputType.SUCCESS)
|
|
272
|
-
PrettyOutput.print(summary, OutputType.SYSTEM)
|
|
273
|
-
PrettyOutput.section("任务完成", OutputType.SUCCESS)
|
|
274
|
-
|
|
275
|
-
return summary
|
|
276
|
-
|
|
277
|
-
except Exception as e:
|
|
278
|
-
PrettyOutput.print(str(e), OutputType.ERROR)
|
|
279
|
-
else:
|
|
280
|
-
# 顶层Agent直接返回空字符串
|
|
281
|
-
PrettyOutput.section("任务完成", OutputType.SUCCESS)
|
|
282
|
-
return ""
|
|
283
|
-
|
|
284
221
|
if user_input == "__interrupt__":
|
|
285
222
|
PrettyOutput.print("任务已取消", OutputType.WARNING)
|
|
286
223
|
return "Task cancelled by user"
|
|
224
|
+
if user_input:
|
|
225
|
+
self.prompt = user_input
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
if not user_input:
|
|
229
|
+
PrettyOutput.section("任务完成", OutputType.SUCCESS)
|
|
230
|
+
return
|
|
231
|
+
|
|
232
|
+
|
|
287
233
|
|
|
288
|
-
self.messages.append({
|
|
289
|
-
"role": "user",
|
|
290
|
-
"content": user_input
|
|
291
|
-
})
|
|
292
234
|
|
|
293
235
|
except Exception as e:
|
|
294
236
|
PrettyOutput.print(str(e), OutputType.ERROR)
|
|
295
237
|
|
|
296
238
|
def clear_history(self):
|
|
297
239
|
"""清除对话历史,只保留系统提示"""
|
|
298
|
-
self.
|
|
240
|
+
self.prompt = ""
|
|
241
|
+
self.model.reset()
|
jarvis/main.py
CHANGED
|
@@ -12,33 +12,9 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
12
12
|
|
|
13
13
|
from jarvis.agent import Agent
|
|
14
14
|
from jarvis.tools import ToolRegistry
|
|
15
|
-
from jarvis.models import
|
|
15
|
+
from jarvis.models import KimiModel
|
|
16
16
|
from jarvis.utils import PrettyOutput, OutputType, get_multiline_input, load_env_from_file
|
|
17
|
-
from jarvis.zte_llm import create_zte_llm
|
|
18
17
|
|
|
19
|
-
# 定义支持的平台和模型
|
|
20
|
-
SUPPORTED_PLATFORMS = {
|
|
21
|
-
"ollama": {
|
|
22
|
-
"models": ["qwen2.5:14b", "qwq"],
|
|
23
|
-
"default": "qwen2.5:14b",
|
|
24
|
-
"allow_custom": True
|
|
25
|
-
},
|
|
26
|
-
"ddgs": {
|
|
27
|
-
"models": ["gpt-4o-mini", "claude-3-haiku", "llama-3.1-70b", "mixtral-8x7b"],
|
|
28
|
-
"default": "gpt-4o-mini",
|
|
29
|
-
"allow_custom": False
|
|
30
|
-
},
|
|
31
|
-
"zte": {
|
|
32
|
-
"models": ["NebulaBiz", "nebulacoder", "NTele-72B"],
|
|
33
|
-
"default": "NebulaBiz",
|
|
34
|
-
"allow_custom": False
|
|
35
|
-
},
|
|
36
|
-
"openai": {
|
|
37
|
-
"models": ["deepseek-chat"],
|
|
38
|
-
"default": "deepseek-chat",
|
|
39
|
-
"allow_custom": True
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
18
|
|
|
43
19
|
def load_tasks() -> dict:
|
|
44
20
|
"""Load tasks from .jarvis file if it exists."""
|
|
@@ -96,77 +72,26 @@ def select_task(tasks: dict) -> str:
|
|
|
96
72
|
|
|
97
73
|
def main():
|
|
98
74
|
"""Main entry point for Jarvis."""
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
parser = argparse.ArgumentParser(description="Jarvis AI Assistant")
|
|
103
|
-
|
|
104
|
-
# 添加平台选择参数
|
|
105
|
-
parser.add_argument(
|
|
106
|
-
"--platform",
|
|
107
|
-
choices=list(SUPPORTED_PLATFORMS.keys()),
|
|
108
|
-
default=os.getenv("JARVIS_PLATFORM") or "ddgs",
|
|
109
|
-
help="选择运行平台 (默认: ollama)"
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
# 添加模型选择参数
|
|
113
|
-
parser.add_argument(
|
|
114
|
-
"--model",
|
|
115
|
-
help="选择模型 (默认: 根据平台自动选择)"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
# 添加API基础URL参数
|
|
119
|
-
parser.add_argument(
|
|
120
|
-
"--api-base",
|
|
121
|
-
default=os.getenv("JARVIS_OLLAMA_API_BASE") or "http://localhost:11434",
|
|
122
|
-
help="Ollama API基础URL (仅用于Ollama平台, 默认: http://localhost:11434)"
|
|
123
|
-
)
|
|
124
|
-
|
|
75
|
+
# Add argument parser
|
|
76
|
+
parser = argparse.ArgumentParser(description='Jarvis AI Assistant')
|
|
77
|
+
parser.add_argument('-f', '--files', nargs='*', help='List of files to process')
|
|
125
78
|
args = parser.parse_args()
|
|
126
79
|
|
|
127
|
-
|
|
80
|
+
load_env_from_file()
|
|
128
81
|
|
|
129
|
-
# 修改模型验证逻辑
|
|
130
|
-
if args.model:
|
|
131
|
-
if (args.model not in SUPPORTED_PLATFORMS[args.platform]["models"] and
|
|
132
|
-
not SUPPORTED_PLATFORMS[args.platform]["allow_custom"]):
|
|
133
|
-
supported_models = ", ".join(SUPPORTED_PLATFORMS[args.platform]["models"])
|
|
134
|
-
PrettyOutput.print(
|
|
135
|
-
f"错误: 平台 {args.platform} 不支持模型 {args.model}\n"
|
|
136
|
-
f"支持的模型: {supported_models}",
|
|
137
|
-
OutputType.ERROR
|
|
138
|
-
)
|
|
139
|
-
return 1
|
|
140
|
-
else:
|
|
141
|
-
args.model = SUPPORTED_PLATFORMS[args.platform]["default"]
|
|
142
|
-
|
|
143
82
|
try:
|
|
144
|
-
|
|
145
|
-
if
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
platform_name = f"Ollama ({args.model})"
|
|
151
|
-
elif args.platform == "ddgs": # ddgs
|
|
152
|
-
model = DDGSModel(model_name=args.model)
|
|
153
|
-
platform_name = f"DuckDuckGo Search ({args.model})"
|
|
154
|
-
elif args.platform == "zte": # zte
|
|
155
|
-
model = create_zte_llm(model_name=args.model)
|
|
156
|
-
platform_name = f"ZTE ({args.model})"
|
|
157
|
-
elif args.platform == "openai":
|
|
158
|
-
model = OpenAIModel(
|
|
159
|
-
model_name=args.model,
|
|
160
|
-
api_key=os.getenv("OPENAI_API_KEY"),
|
|
161
|
-
api_base=os.getenv("OPENAI_API_BASE")
|
|
162
|
-
)
|
|
163
|
-
platform_name = f"OpenAI ({args.model})"
|
|
83
|
+
kimi_api_key = os.getenv("KIMI_API_KEY")
|
|
84
|
+
if not kimi_api_key:
|
|
85
|
+
PrettyOutput.print("Kimi API key 未设置", OutputType.ERROR)
|
|
86
|
+
return 1
|
|
87
|
+
|
|
88
|
+
model = KimiModel(kimi_api_key)
|
|
164
89
|
|
|
165
90
|
tool_registry = ToolRegistry(model)
|
|
166
91
|
agent = Agent(model, tool_registry)
|
|
167
92
|
|
|
168
93
|
# 欢迎信息
|
|
169
|
-
PrettyOutput.print(f"Jarvis 已初始化 -
|
|
94
|
+
PrettyOutput.print(f"Jarvis 已初始化 - With Kimi", OutputType.SYSTEM)
|
|
170
95
|
|
|
171
96
|
# 加载预定义任务
|
|
172
97
|
tasks = load_tasks()
|
|
@@ -174,7 +99,7 @@ def main():
|
|
|
174
99
|
selected_task = select_task(tasks)
|
|
175
100
|
if selected_task:
|
|
176
101
|
PrettyOutput.print(f"\n执行任务: {selected_task}", OutputType.INFO)
|
|
177
|
-
agent.run(selected_task)
|
|
102
|
+
agent.run(selected_task, args.files)
|
|
178
103
|
return 0
|
|
179
104
|
|
|
180
105
|
# 如果没有选择预定义任务,进入交互模式
|
|
@@ -183,7 +108,7 @@ def main():
|
|
|
183
108
|
user_input = get_multiline_input("请输入您的任务(输入空行退出):")
|
|
184
109
|
if not user_input or user_input == "__interrupt__":
|
|
185
110
|
break
|
|
186
|
-
agent.run(user_input)
|
|
111
|
+
agent.run(user_input, args.files)
|
|
187
112
|
except Exception as e:
|
|
188
113
|
PrettyOutput.print(f"错误: {str(e)}", OutputType.ERROR)
|
|
189
114
|
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import Dict, List, Optional, Tuple
|
|
2
|
+
from duckduckgo_search import DDGS
|
|
3
|
+
import ollama
|
|
4
|
+
import yaml
|
|
5
|
+
import openai
|
|
6
|
+
|
|
7
|
+
from ..utils import OutputType, PrettyOutput
|
|
8
|
+
from .base import BaseModel
|
|
9
|
+
from .kimi import KimiModel
|
|
10
|
+
|
|
11
|
+
__all__ = ['BaseModel', 'KimiModel']
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
jarvis/models/base.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Dict, List
|
|
3
|
+
|
|
4
|
+
class BaseModel(ABC):
|
|
5
|
+
"""大语言模型基类"""
|
|
6
|
+
|
|
7
|
+
@abstractmethod
|
|
8
|
+
def chat(self, message: str) -> str:
|
|
9
|
+
"""执行对话"""
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
def upload_files(self, file_list: List[str]) -> List[Dict]:
|
|
13
|
+
"""上传文件"""
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
def reset(self):
|
|
17
|
+
"""重置模型"""
|
|
18
|
+
pass
|
|
19
|
+
|