jarvis-ai-assistant 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jarvis-ai-assistant might be problematic. Click here for more details.

Files changed (32) hide show
  1. jarvis/__init__.py +1 -1
  2. jarvis/__pycache__/__init__.cpython-313.pyc +0 -0
  3. jarvis/__pycache__/agent.cpython-313.pyc +0 -0
  4. jarvis/__pycache__/main.cpython-313.pyc +0 -0
  5. jarvis/__pycache__/models.cpython-313.pyc +0 -0
  6. jarvis/__pycache__/zte_llm.cpython-313.pyc +0 -0
  7. jarvis/agent.py +203 -100
  8. jarvis/main.py +22 -6
  9. jarvis/models.py +58 -66
  10. jarvis/tools/__init__.py +0 -2
  11. jarvis/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  12. jarvis/tools/__pycache__/base.cpython-313.pyc +0 -0
  13. jarvis/tools/__pycache__/bing_search.cpython-313.pyc +0 -0
  14. jarvis/tools/__pycache__/file_ops.cpython-313.pyc +0 -0
  15. jarvis/tools/__pycache__/search.cpython-313.pyc +0 -0
  16. jarvis/tools/__pycache__/shell.cpython-313.pyc +0 -0
  17. jarvis/tools/__pycache__/sub_agent.cpython-313.pyc +0 -0
  18. jarvis/tools/__pycache__/webpage.cpython-313.pyc +0 -0
  19. jarvis/tools/base.py +8 -19
  20. jarvis/tools/file_ops.py +1 -1
  21. jarvis/tools/search.py +112 -16
  22. jarvis/tools/shell.py +1 -20
  23. jarvis/tools/sub_agent.py +17 -75
  24. jarvis/tools/webpage.py +12 -26
  25. jarvis/zte_llm.py +26 -27
  26. {jarvis_ai_assistant-0.1.6.dist-info → jarvis_ai_assistant-0.1.8.dist-info}/METADATA +2 -1
  27. jarvis_ai_assistant-0.1.8.dist-info/RECORD +38 -0
  28. jarvis/tools/user_input.py +0 -74
  29. jarvis_ai_assistant-0.1.6.dist-info/RECORD +0 -38
  30. {jarvis_ai_assistant-0.1.6.dist-info → jarvis_ai_assistant-0.1.8.dist-info}/WHEEL +0 -0
  31. {jarvis_ai_assistant-0.1.6.dist-info → jarvis_ai_assistant-0.1.8.dist-info}/entry_points.txt +0 -0
  32. {jarvis_ai_assistant-0.1.6.dist-info → jarvis_ai_assistant-0.1.8.dist-info}/top_level.txt +0 -0
jarvis/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Jarvis AI Assistant"""
2
2
 
3
- __version__ = "0.1.6"
3
+ __version__ = "0.1.8"
Binary file
Binary file
Binary file
Binary file
Binary file
jarvis/agent.py CHANGED
@@ -1,6 +1,8 @@
1
1
  import json
2
2
  import subprocess
3
- from typing import Dict, Any, List, Optional
3
+ from typing import Dict, Any, List, Optional, Tuple
4
+
5
+ import yaml
4
6
  from .tools import ToolRegistry
5
7
  from .utils import PrettyOutput, OutputType, get_multiline_input
6
8
  from .models import BaseModel
@@ -9,86 +11,177 @@ import os
9
11
  from datetime import datetime
10
12
 
11
13
  class Agent:
12
- def __init__(self, model: BaseModel, tool_registry: ToolRegistry, name: str = "Jarvis"):
13
- """Initialize Agent with a model, optional tool registry and name"""
14
+ def __init__(self, model: BaseModel, tool_registry: ToolRegistry, name: str = "Jarvis", is_sub_agent: bool = False):
15
+ """Initialize Agent with a model, optional tool registry and name
16
+
17
+ Args:
18
+ model: 语言模型实例
19
+ tool_registry: 工具注册表实例
20
+ name: Agent名称,默认为"Jarvis"
21
+ is_sub_agent: 是否为子Agent,默认为False
22
+ """
14
23
  self.model = model
15
24
  self.tool_registry = tool_registry or ToolRegistry(model)
16
25
  self.name = name
26
+ self.is_sub_agent = is_sub_agent
17
27
 
18
28
  # 构建工具说明
19
- tools_prompt = "Available Tools:\n"
29
+ tools_prompt = "可用工具:\n"
20
30
  for tool in self.tool_registry.get_all_tools():
21
- tools_prompt += f"- Tool: {tool['function']['name']}\n"
22
- tools_prompt += f" Description: {tool['function']['description']}\n"
23
- tools_prompt += f" Arguments: {tool['function']['parameters']}\n"
31
+ tools_prompt += f"- 名称: {tool['name']}\n"
32
+ tools_prompt += f" 描述: {tool['description']}\n"
33
+ tools_prompt += f" 参数: {tool['parameters']}\n"
24
34
 
25
35
  self.messages = [
26
36
  {
27
37
  "role": "system",
28
- "content": f"""You are {name}, an AI assistant that follows the ReAct (Reasoning + Acting) framework to solve tasks step by step.
38
+ "content": f"""你是 {name},一个严格遵循 ReAct 框架进行逐步推理和行动的 AI 助手。
39
+
40
+ {tools_prompt}
41
+
42
+ 关键规则:
43
+ ‼️ 禁止创建虚假对话
44
+ ‼️ 禁止假设用户回应
45
+ ‼️ 禁止在没有实际用户输入时继续
46
+ ‼️ 只回应用户实际说的内容
47
+ ‼️ 每个动作后停止并等待
29
48
 
30
- FRAMEWORK:
31
- 1. Thought: Analyze the current situation and plan the next step
32
- 2. Action: Execute ONE specific tool call
33
- 3. Observation: Review the result
34
- 4. Next: Plan the next step or conclude
49
+ ReAct 框架:
50
+ 1. 思考
51
+ - 分析当前情况
52
+ - 考虑可用工具
53
+ - 规划下一步行动
54
+ - 仅基于事实
55
+ - 不做用户回应的假设
56
+ - 不想象对话内容
35
57
 
36
- FORMAT:
37
- Thought: I need to [reasoning about the current situation]...
38
- Action: I will use [tool] to [purpose]...
58
+ 2. 行动(可选)
59
+ - 不是每次回应都需要调用工具
60
+ - 如果需要更多信息,直接询问用户
61
+ - 使用工具时:
62
+ - 只使用下面列出的工具
63
+ - 每次只执行一个工具
64
+ - 工具由用户手动执行
65
+ - 必须使用有效的YAML格式:
66
+ <START_TOOL_CALL>
67
+ name: tool_name
68
+ arguments:
69
+ param1: value1 # 所有参数必须正确缩进
70
+ param2: | # 使用YAML块样式表示多行字符串
71
+ line1
72
+ line2
73
+ <END_TOOL_CALL>
74
+
75
+ 3. 观察
76
+ - 等待工具执行结果或用户回应
77
+ - 工具执行结果由用户提供
78
+ - 不要假设或想象回应
79
+ - 不要创建虚假对话
80
+ - 停止并等待实际输入
81
+
82
+ 回应格式:
83
+ 思考:我分析当前情况[具体情况]... 基于[事实],我需要[目标]...
84
+
85
+ [如果需要使用工具:]
86
+ 行动:我将使用[工具]来[具体目的]...
39
87
  <START_TOOL_CALL>
40
88
  name: tool_name
41
89
  arguments:
42
90
  param1: value1
91
+ param2: |
92
+ multiline
93
+ value
43
94
  <END_TOOL_CALL>
44
95
 
45
- After receiving result:
46
- Observation: The tool returned [analyze result]...
47
- Next: Based on this, I will [next step]...
48
-
49
- CORE RULES:
50
- 1. ONE Action Per Response
51
- - Only ONE tool call per response
52
- - Additional tool calls will be ignored
53
- - Complete current step before next
54
-
55
- 2. Clear Reasoning
56
- - Explain your thought process
57
- - Justify tool selection
58
- - Analyze results thoroughly
59
-
60
- Examples:
61
- ✓ Good Response:
62
- Thought: I need to check the content of utils.py first to understand its structure.
63
- Action: I will read the file content.
64
- <START_TOOL_CALL>
65
- name: file_operation
66
- arguments:
67
- operation: read
68
- filepath: src/utils.py
69
- <END_TOOL_CALL>
70
-
71
- ✗ Bad Response:
72
- Thought: Let's analyze the code.
73
- Action: I'll read and check everything.
74
- [Multiple or vague tool calls...]
96
+ [如果需要更多信息:]
97
+ 我需要了解更多关于[具体细节]的信息。请提供[需要的信息]。
75
98
 
76
- Remember:
77
- - Always start with "Thought:"
78
- - Use exactly ONE tool per response
79
- - Wait for results before next step
80
- - Clearly explain your reasoning
99
+ 严格规则:
100
+ ‼️ 只使用下面列出的工具
101
+ ‼️ 工具调用是可选的 - 需要时询问用户
102
+ ‼️ 每次只能调用一个工具
103
+ ‼️ 工具调用必须是有效的YAML格式
104
+ ‼️ 参数必须正确缩进
105
+ ‼️ 使用YAML块样式(|)表示多行值
106
+ ‼️ <END_TOOL_CALL>后的内容将被丢弃
107
+ ‼️ 工具由用户手动执行
108
+ ‼️ 等待用户提供工具执行结果
109
+ ‼️ 不要假设或想象用户回应
110
+ ‼️ 没有用户输入时不要继续对话
111
+ ‼️ 不要创建虚假对话
112
+ ‼️ 每个动作后停止
113
+ ‼️ 不要假设结果
114
+ ‼️ 不要假设行动
81
115
 
82
- {tools_prompt}"""
116
+ 注意事项:
117
+ - 先思考再行动
118
+ - 需要时询问用户
119
+ - 只使用列出的工具
120
+ - 一次一个工具
121
+ - 严格遵循YAML格式
122
+ - 等待用户回应
123
+ - 工具结果来自用户
124
+ - 不要假设回应
125
+ - 不要虚构对话
126
+ - 每个动作后停止
127
+ - 只在有实际用户输入时继续
128
+ """
83
129
  }
84
130
  ]
85
131
 
86
- def _call_model(self, messages: List[Dict], use_tools: bool = True) -> Dict:
132
+ @staticmethod
133
+ def extract_tool_calls(content: str) -> Tuple[str, List[Dict]]:
134
+ """从内容中提取工具调用,如果检测到多个工具调用则抛出异常,并返回工具调用之前的内容和工具调用"""
135
+ # 分割内容为行
136
+ lines = content.split('\n')
137
+ tool_call_lines = []
138
+ content_lines = [] # 存储工具调用之前的内容
139
+ in_tool_call = False
140
+
141
+ # 逐行处理
142
+ for line in lines:
143
+ content_lines.append(line) # 所有内容都添加到 content_lines
144
+
145
+ if '<START_TOOL_CALL>' in line:
146
+ in_tool_call = True
147
+ continue
148
+ elif '<END_TOOL_CALL>' in line:
149
+ if in_tool_call and tool_call_lines:
150
+ try:
151
+ # 直接解析YAML
152
+ tool_call_text = '\n'.join(tool_call_lines)
153
+ tool_call_data = yaml.safe_load(tool_call_text)
154
+
155
+ # 验证必要的字段
156
+ if "name" in tool_call_data and "arguments" in tool_call_data:
157
+ # 返回工具调用之前的内容和工具调用
158
+ return '\n'.join(content_lines), [{
159
+ "name": tool_call_data["name"],
160
+ "arguments": tool_call_data["arguments"]
161
+ }]
162
+ else:
163
+ PrettyOutput.print("工具调用缺少必要字段", OutputType.ERROR)
164
+ raise '工具调用缺少必要字段'
165
+ except yaml.YAMLError as e:
166
+ PrettyOutput.print(f"YAML解析错误: {str(e)}", OutputType.ERROR)
167
+ raise 'YAML解析错误'
168
+ except Exception as e:
169
+ PrettyOutput.print(f"处理工具调用时发生错误: {str(e)}", OutputType.ERROR)
170
+ raise '处理工具调用时发生错误'
171
+ in_tool_call = False
172
+ continue
173
+
174
+ if in_tool_call:
175
+ tool_call_lines.append(line)
176
+
177
+ # 如果没有找到有效的工具调用,返回原始内容
178
+ return '\n'.join(content_lines), []
179
+
180
+ def _call_model(self, messages: List[Dict]) -> Dict:
87
181
  """调用模型获取响应"""
88
182
  try:
89
183
  return self.model.chat(
90
184
  messages=messages,
91
- tools=self.tool_registry.get_all_tools() if use_tools else []
92
185
  )
93
186
  except Exception as e:
94
187
  raise Exception(f"{self.name}: 模型调用失败: {str(e)}")
@@ -109,33 +202,38 @@ Remember:
109
202
  try:
110
203
  # 显示思考状态
111
204
  PrettyOutput.print("分析任务...", OutputType.PROGRESS)
112
- response = self._call_model(self.messages)
113
- current_response = response
205
+
206
+ current_response = self._call_model(self.messages)
207
+
208
+ try:
209
+ result = Agent.extract_tool_calls(current_response)
210
+ except Exception as e:
211
+ PrettyOutput.print(f"工具调用错误: {str(e)}", OutputType.ERROR)
212
+ self.messages.append({
213
+ "role": "user",
214
+ "content": f"工具调用错误: {str(e)}"
215
+ })
216
+ continue
114
217
 
115
- # 流式输出已经在model中处理,这里添加换行
116
- PrettyOutput.print_stream_end()
117
218
 
118
219
  self.messages.append({
119
220
  "role": "assistant",
120
- "content": response["message"].get("content", ""),
121
- "tool_calls": current_response["message"]["tool_calls"]
221
+ "content": result[0]
122
222
  })
223
+
123
224
 
124
- if len(current_response["message"]["tool_calls"]) > 0:
125
- if current_response["message"].get("content"):
126
- PrettyOutput.print(current_response["message"]["content"], OutputType.SYSTEM)
127
-
225
+ if len(result[1]) > 0:
128
226
  try:
129
227
  # 显示工具调用
130
228
  PrettyOutput.print("执行工具调用...", OutputType.PROGRESS)
131
- tool_result = self.tool_registry.handle_tool_calls(current_response["message"]["tool_calls"])
229
+ tool_result = self.tool_registry.handle_tool_calls(result[1])
132
230
  PrettyOutput.print(tool_result, OutputType.RESULT)
133
231
  except Exception as e:
134
232
  PrettyOutput.print(str(e), OutputType.ERROR)
135
233
  tool_result = f"Tool call failed: {str(e)}"
136
234
 
137
235
  self.messages.append({
138
- "role": "tool",
236
+ "role": "user",
139
237
  "content": tool_result
140
238
  })
141
239
  continue
@@ -143,40 +241,45 @@ Remember:
143
241
  # 获取用户输入
144
242
  user_input = get_multiline_input(f"{self.name}: 您可以继续输入,或输入空行结束当前任务")
145
243
  if not user_input:
146
- PrettyOutput.print("生成任务总结...", OutputType.PROGRESS)
147
-
148
- # 生成任务总结
149
- summary_prompt = {
150
- "role": "user",
151
- "content": """The task has been completed. Based on the previous analysis and execution results, provide a task summary including:
152
-
153
- 1. Key Information:
154
- - Essential findings from analysis
155
- - Important results from tool executions
156
- - Critical data discovered
157
-
158
- 2. Task Results:
159
- - Final outcome
160
- - Actual achievements
161
- - Concrete results
162
-
163
- Focus only on facts and actual results. Be direct and concise."""
164
- }
165
-
166
- while True:
167
- try:
168
- summary_response = self._call_model(self.messages + [summary_prompt], use_tools=False)
169
- summary = summary_response["message"].get("content", "")
170
-
171
- # 显示任务总结
172
- PrettyOutput.section("任务总结", OutputType.SUCCESS)
173
- PrettyOutput.print(summary, OutputType.SYSTEM)
174
- PrettyOutput.section("任务完成", OutputType.SUCCESS)
175
-
176
- return summary
177
-
178
- except Exception as e:
179
- PrettyOutput.print(str(e), OutputType.ERROR)
244
+ # 只有子Agent才需要生成任务总结
245
+ if self.is_sub_agent:
246
+ PrettyOutput.print("生成任务总结...", OutputType.PROGRESS)
247
+
248
+ # 生成任务总结
249
+ summary_prompt = {
250
+ "role": "user",
251
+ "content": """任务已完成。请根据之前的分析和执行结果,提供一个简明的任务总结,包括:
252
+
253
+ 1. 关键发现:
254
+ - 分析过程中的重要发现
255
+ - 工具执行的关键结果
256
+ - 发现的重要数据
257
+
258
+ 2. 执行成果:
259
+ - 任务完成情况
260
+ - 具体实现结果
261
+ - 达成的目标
262
+
263
+ 请直接描述事实和实际结果,保持简洁明了。"""
264
+ }
265
+
266
+ while True:
267
+ try:
268
+ summary = self._call_model(self.messages + [summary_prompt])
269
+
270
+ # 显示任务总结
271
+ PrettyOutput.section("任务总结", OutputType.SUCCESS)
272
+ PrettyOutput.print(summary, OutputType.SYSTEM)
273
+ PrettyOutput.section("任务完成", OutputType.SUCCESS)
274
+
275
+ return summary
276
+
277
+ except Exception as e:
278
+ PrettyOutput.print(str(e), OutputType.ERROR)
279
+ else:
280
+ # 顶层Agent直接返回空字符串
281
+ PrettyOutput.section("任务完成", OutputType.SUCCESS)
282
+ return ""
180
283
 
181
284
  if user_input == "__interrupt__":
182
285
  PrettyOutput.print("任务已取消", OutputType.WARNING)
jarvis/main.py CHANGED
@@ -12,7 +12,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
12
12
 
13
13
  from jarvis.agent import Agent
14
14
  from jarvis.tools import ToolRegistry
15
- from jarvis.models import DDGSModel, OllamaModel
15
+ from jarvis.models import DDGSModel, OllamaModel, OpenAIModel
16
16
  from jarvis.utils import PrettyOutput, OutputType, get_multiline_input, load_env_from_file
17
17
  from jarvis.zte_llm import create_zte_llm
18
18
 
@@ -20,15 +20,23 @@ from jarvis.zte_llm import create_zte_llm
20
20
  SUPPORTED_PLATFORMS = {
21
21
  "ollama": {
22
22
  "models": ["qwen2.5:14b", "qwq"],
23
- "default": "qwen2.5:14b"
23
+ "default": "qwen2.5:14b",
24
+ "allow_custom": True
24
25
  },
25
26
  "ddgs": {
26
27
  "models": ["gpt-4o-mini", "claude-3-haiku", "llama-3.1-70b", "mixtral-8x7b"],
27
- "default": "gpt-4o-mini"
28
+ "default": "gpt-4o-mini",
29
+ "allow_custom": False
28
30
  },
29
31
  "zte": {
30
32
  "models": ["NebulaBiz", "nebulacoder", "NTele-72B"],
31
- "default": "NebulaBiz"
33
+ "default": "NebulaBiz",
34
+ "allow_custom": False
35
+ },
36
+ "openai": {
37
+ "models": ["deepseek-chat"],
38
+ "default": "deepseek-chat",
39
+ "allow_custom": True
32
40
  }
33
41
  }
34
42
 
@@ -118,9 +126,10 @@ def main():
118
126
 
119
127
  args.model = args.model or os.getenv("JARVIS_MODEL")
120
128
 
121
- # 验证并设置默认模型
129
+ # 修改模型验证逻辑
122
130
  if args.model:
123
- if args.model not in SUPPORTED_PLATFORMS[args.platform]["models"]:
131
+ if (args.model not in SUPPORTED_PLATFORMS[args.platform]["models"] and
132
+ not SUPPORTED_PLATFORMS[args.platform]["allow_custom"]):
124
133
  supported_models = ", ".join(SUPPORTED_PLATFORMS[args.platform]["models"])
125
134
  PrettyOutput.print(
126
135
  f"错误: 平台 {args.platform} 不支持模型 {args.model}\n"
@@ -145,6 +154,13 @@ def main():
145
154
  elif args.platform == "zte": # zte
146
155
  model = create_zte_llm(model_name=args.model)
147
156
  platform_name = f"ZTE ({args.model})"
157
+ elif args.platform == "openai":
158
+ model = OpenAIModel(
159
+ model_name=args.model,
160
+ api_key=os.getenv("OPENAI_API_KEY"),
161
+ api_base=os.getenv("OPENAI_API_BASE")
162
+ )
163
+ platform_name = f"OpenAI ({args.model})"
148
164
 
149
165
  tool_registry = ToolRegistry(model)
150
166
  agent = Agent(model, tool_registry)
jarvis/models.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import re
2
2
  import time
3
- from typing import Dict, List, Optional
3
+ from typing import Dict, List, Optional, Tuple
4
4
  from duckduckgo_search import DDGS
5
5
  import ollama
6
6
  from abc import ABC, abstractmethod
7
7
  import yaml
8
+ import openai
8
9
 
9
10
  from .utils import OutputType, PrettyOutput
10
11
 
@@ -12,53 +13,10 @@ class BaseModel(ABC):
12
13
  """大语言模型基类"""
13
14
 
14
15
  @abstractmethod
15
- def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
16
+ def chat(self, messages: List[Dict]) -> str:
16
17
  """执行对话"""
17
18
  pass
18
19
 
19
- @staticmethod
20
- def extract_tool_calls(content: str) -> List[Dict]:
21
- """从内容中提取工具调用,只返回第一个有效的工具调用"""
22
- # 分割内容为行
23
- lines = content.split('\n')
24
- tool_call_lines = []
25
- in_tool_call = False
26
-
27
- # 逐行处理
28
- for line in lines:
29
- if not line:
30
- continue
31
-
32
- if line == '<START_TOOL_CALL>':
33
- tool_call_lines = []
34
- in_tool_call = True
35
- continue
36
- elif line == '<END_TOOL_CALL>':
37
- if in_tool_call and tool_call_lines:
38
- try:
39
- # 解析工具调用内容
40
- tool_call_text = '\n'.join(tool_call_lines)
41
- tool_call_data = yaml.safe_load(tool_call_text)
42
-
43
- # 验证必要的字段
44
- if "name" in tool_call_data and "arguments" in tool_call_data:
45
- # 只返回第一个有效的工具调用
46
- return [{
47
- "function": {
48
- "name": tool_call_data["name"],
49
- "arguments": tool_call_data["arguments"]
50
- }
51
- }]
52
- except yaml.YAMLError:
53
- pass # 跳过无效的YAML
54
- except Exception:
55
- pass # 跳过其他错误
56
- in_tool_call = False
57
- elif in_tool_call:
58
- tool_call_lines.append(line)
59
-
60
- return [] # 如果没有找到有效的工具调用,返回空列表
61
-
62
20
 
63
21
  class DDGSModel(BaseModel):
64
22
  def __init__(self, model_name: str = "gpt-4o-mini"):
@@ -70,24 +28,18 @@ class DDGSModel(BaseModel):
70
28
  """
71
29
  self.model_name = model_name
72
30
 
73
- def __make_prompt(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> str:
31
+ def __make_prompt(self, messages: List[Dict]) -> str:
74
32
  prompt = ""
75
33
  for message in messages:
76
34
  prompt += f"[{message['role']}]: {message['content']}\n"
77
35
  return prompt
78
36
 
79
- def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
37
+ def chat(self, messages: List[Dict]) -> str:
80
38
  ddgs = DDGS()
81
- prompt = self.__make_prompt(messages, tools)
39
+ prompt = self.__make_prompt(messages)
82
40
  content = ddgs.chat(prompt)
83
- PrettyOutput.print_stream(content, OutputType.SYSTEM)
84
- tool_calls = BaseModel.extract_tool_calls(content)
85
- return {
86
- "message": {
87
- "content": content,
88
- "tool_calls": tool_calls
89
- }
90
- }
41
+ PrettyOutput.print(content, OutputType.SYSTEM)
42
+ return content
91
43
 
92
44
 
93
45
  class OllamaModel(BaseModel):
@@ -98,7 +50,7 @@ class OllamaModel(BaseModel):
98
50
  self.api_base = api_base
99
51
  self.client = ollama.Client(host=api_base)
100
52
 
101
- def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
53
+ def chat(self, messages: List[Dict]) -> str:
102
54
  """调用Ollama API获取响应"""
103
55
  try:
104
56
  # 使用流式调用
@@ -116,15 +68,55 @@ class OllamaModel(BaseModel):
116
68
  # 实时打印内容
117
69
  PrettyOutput.print_stream(chunk.message.content, OutputType.SYSTEM)
118
70
 
71
+ PrettyOutput.print_stream_end()
72
+
73
+ # 合并完整内容
74
+ return "".join(content_parts)
75
+
76
+ except Exception as e:
77
+ raise Exception(f"Ollama API调用失败: {str(e)}")
78
+
79
+
80
+ class OpenAIModel(BaseModel):
81
+ """OpenAI模型实现"""
82
+
83
+ def __init__(self, model_name: str = "deepseek-chat", api_key: Optional[str] = None, api_base: Optional[str] = None):
84
+ """
85
+ 初始化OpenAI模型
86
+ Args:
87
+ model_name: 模型名称,默认为 deepseek-chat
88
+ api_key: OpenAI API密钥
89
+ api_base: 可选的API基础URL,用于自定义端点
90
+ """
91
+ self.model_name = model_name
92
+ if api_key:
93
+ openai.api_key = api_key
94
+ if api_base:
95
+ openai.base_url = api_base
96
+
97
+ def chat(self, messages: List[Dict]) -> str:
98
+ """调用OpenAI API获取响应"""
99
+ try:
100
+ # 使用流式调用
101
+ stream = openai.chat.completions.create(
102
+ model=self.model_name,
103
+ messages=messages,
104
+ stream=True
105
+ )
106
+
107
+ # 收集完整响应
108
+ content_parts = []
109
+ for chunk in stream:
110
+ if chunk.choices[0].delta.content:
111
+ content = chunk.choices[0].delta.content
112
+ content_parts.append(content)
113
+ # 实时打印内容
114
+ PrettyOutput.print_stream(content, OutputType.SYSTEM)
115
+
116
+ PrettyOutput.print_stream_end()
117
+
119
118
  # 合并完整内容
120
- content = "".join(content_parts)
121
- tool_calls = BaseModel.extract_tool_calls(content)
119
+ return "".join(content_parts)
122
120
 
123
- return {
124
- "message": {
125
- "content": content,
126
- "tool_calls": tool_calls
127
- }
128
- }
129
121
  except Exception as e:
130
- raise Exception(f"Ollama API调用失败: {str(e)}")
122
+ raise Exception(f"OpenAI API调用失败: {str(e)}")
jarvis/tools/__init__.py CHANGED
@@ -3,7 +3,6 @@ from .file_ops import FileOperationTool
3
3
  from .search import SearchTool
4
4
  from .shell import ShellTool
5
5
  from .webpage import WebpageTool
6
- from .user_input import UserInputTool
7
6
 
8
7
  __all__ = [
9
8
  'Tool',
@@ -12,5 +11,4 @@ __all__ = [
12
11
  'SearchTool',
13
12
  'ShellTool',
14
13
  'WebpageTool',
15
- 'UserInputTool',
16
14
  ]