jarvis-ai-assistant 0.1.3__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {jarvis_ai_assistant-0.1.3/src/jarvis_ai_assistant.egg-info → jarvis_ai_assistant-0.1.6}/PKG-INFO +6 -4
  2. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/pyproject.toml +3 -9
  3. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/setup.py +2 -3
  4. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/__init__.py +1 -1
  5. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/__init__.cpython-313.pyc +0 -0
  6. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/agent.cpython-313.pyc +0 -0
  7. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/main.cpython-313.pyc +0 -0
  8. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/models.cpython-313.pyc +0 -0
  9. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/utils.cpython-313.pyc +0 -0
  10. jarvis_ai_assistant-0.1.6/src/jarvis/__pycache__/zte_llm.cpython-313.pyc +0 -0
  11. jarvis_ai_assistant-0.1.6/src/jarvis/agent.py +195 -0
  12. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/main.py +44 -24
  13. jarvis_ai_assistant-0.1.6/src/jarvis/models.py +130 -0
  14. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__init__.py +3 -9
  15. jarvis_ai_assistant-0.1.6/src/jarvis/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  16. jarvis_ai_assistant-0.1.6/src/jarvis/tools/__pycache__/base.cpython-313.pyc +0 -0
  17. jarvis_ai_assistant-0.1.6/src/jarvis/tools/__pycache__/shell.cpython-313.pyc +0 -0
  18. jarvis_ai_assistant-0.1.6/src/jarvis/tools/__pycache__/sub_agent.cpython-313.pyc +0 -0
  19. jarvis_ai_assistant-0.1.6/src/jarvis/tools/__pycache__/user_input.cpython-313.pyc +0 -0
  20. jarvis_ai_assistant-0.1.6/src/jarvis/tools/base.py +130 -0
  21. jarvis_ai_assistant-0.1.6/src/jarvis/tools/shell.py +95 -0
  22. jarvis_ai_assistant-0.1.6/src/jarvis/tools/sub_agent.py +141 -0
  23. jarvis_ai_assistant-0.1.6/src/jarvis/tools/user_input.py +74 -0
  24. jarvis_ai_assistant-0.1.6/src/jarvis/utils.py +164 -0
  25. jarvis_ai_assistant-0.1.6/src/jarvis/zte_llm.py +136 -0
  26. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6/src/jarvis_ai_assistant.egg-info}/PKG-INFO +6 -4
  27. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis_ai_assistant.egg-info/SOURCES.txt +7 -5
  28. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis_ai_assistant.egg-info/requires.txt +1 -2
  29. jarvis_ai_assistant-0.1.3/src/jarvis/.jarvis +0 -1
  30. jarvis_ai_assistant-0.1.3/src/jarvis/__pycache__/__init__.cpython-313.pyc +0 -0
  31. jarvis_ai_assistant-0.1.3/src/jarvis/__pycache__/agent.cpython-313.pyc +0 -0
  32. jarvis_ai_assistant-0.1.3/src/jarvis/__pycache__/models.cpython-313.pyc +0 -0
  33. jarvis_ai_assistant-0.1.3/src/jarvis/__pycache__/utils.cpython-313.pyc +0 -0
  34. jarvis_ai_assistant-0.1.3/src/jarvis/agent.py +0 -100
  35. jarvis_ai_assistant-0.1.3/src/jarvis/models.py +0 -112
  36. jarvis_ai_assistant-0.1.3/src/jarvis/tools/__pycache__/__init__.cpython-313.pyc +0 -0
  37. jarvis_ai_assistant-0.1.3/src/jarvis/tools/__pycache__/base.cpython-313.pyc +0 -0
  38. jarvis_ai_assistant-0.1.3/src/jarvis/tools/__pycache__/shell.cpython-313.pyc +0 -0
  39. jarvis_ai_assistant-0.1.3/src/jarvis/tools/base.py +0 -211
  40. jarvis_ai_assistant-0.1.3/src/jarvis/tools/python_script.py +0 -150
  41. jarvis_ai_assistant-0.1.3/src/jarvis/tools/rag.py +0 -154
  42. jarvis_ai_assistant-0.1.3/src/jarvis/tools/shell.py +0 -81
  43. jarvis_ai_assistant-0.1.3/src/jarvis/tools/user_confirmation.py +0 -58
  44. jarvis_ai_assistant-0.1.3/src/jarvis/tools/user_interaction.py +0 -86
  45. jarvis_ai_assistant-0.1.3/src/jarvis/utils.py +0 -105
  46. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/MANIFEST.in +0 -0
  47. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/README.md +0 -0
  48. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/setup.cfg +0 -0
  49. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/__pycache__/tools.cpython-313.pyc +0 -0
  50. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/file_ops.cpython-313.pyc +0 -0
  51. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/python_script.cpython-313.pyc +0 -0
  52. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/rag.cpython-313.pyc +0 -0
  53. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/search.cpython-313.pyc +0 -0
  54. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/user_confirmation.cpython-313.pyc +0 -0
  55. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/user_interaction.cpython-313.pyc +0 -0
  56. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/__pycache__/webpage.cpython-313.pyc +0 -0
  57. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/file_ops.py +0 -0
  58. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/search.py +0 -0
  59. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis/tools/webpage.py +0 -0
  60. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis_ai_assistant.egg-info/dependency_links.txt +0 -0
  61. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis_ai_assistant.egg-info/entry_points.txt +0 -0
  62. {jarvis_ai_assistant-0.1.3 → jarvis_ai_assistant-0.1.6}/src/jarvis_ai_assistant.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: jarvis-ai-assistant
3
- Version: 0.1.3
3
+ Version: 0.1.6
4
4
  Summary: Jarvis: An AI assistant that uses tools to interact with the system
5
5
  Home-page: https://github.com/skyfireitdiy/Jarvis
6
6
  Author: skyfire
@@ -21,13 +21,15 @@ Requires-Dist: beautifulsoup4>=4.9.3
21
21
  Requires-Dist: duckduckgo-search>=3.0.0
22
22
  Requires-Dist: pyyaml>=5.1
23
23
  Requires-Dist: ollama>=0.1.6
24
- Requires-Dist: sentence-transformers>=2.5.1
25
- Requires-Dist: chromadb>=0.4.24
24
+ Requires-Dist: colorama>=0.4.6
26
25
  Provides-Extra: dev
27
26
  Requires-Dist: pytest; extra == "dev"
28
27
  Requires-Dist: black; extra == "dev"
29
28
  Requires-Dist: isort; extra == "dev"
30
29
  Requires-Dist: mypy; extra == "dev"
30
+ Dynamic: author
31
+ Dynamic: home-page
32
+ Dynamic: requires-python
31
33
 
32
34
  <div align="center">
33
35
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "jarvis-ai-assistant"
7
- version = "0.1.3"
7
+ version = "0.1.6"
8
8
  description = "Jarvis: An AI assistant that uses tools to interact with the system"
9
9
  readme = "README.md"
10
10
  authors = [{ name = "Your Name", email = "your.email@example.com" }]
@@ -25,18 +25,12 @@ dependencies = [
25
25
  "duckduckgo-search>=3.0.0",
26
26
  "pyyaml>=5.1",
27
27
  "ollama>=0.1.6",
28
- "sentence-transformers>=2.5.1",
29
- "chromadb>=0.4.24",
28
+ "colorama>=0.4.6",
30
29
  ]
31
30
  requires-python = ">=3.8"
32
31
 
33
32
  [project.optional-dependencies]
34
- dev = [
35
- "pytest",
36
- "black",
37
- "isort",
38
- "mypy",
39
- ]
33
+ dev = ["pytest", "black", "isort", "mypy"]
40
34
 
41
35
  [project.urls]
42
36
  Homepage = "https://github.com/skyfireitdiy/Jarvis"
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="jarvis-ai-assistant",
5
- version="0.1.3",
5
+ version="0.1.6",
6
6
  author="skyfire",
7
7
  author_email="skyfireitdiy@hotmail.com",
8
8
  description="An AI assistant that uses various tools to interact with the system",
@@ -18,8 +18,7 @@ setup(
18
18
  "duckduckgo-search>=3.0.0",
19
19
  "pyyaml>=5.1",
20
20
  "ollama>=0.1.6",
21
- "sentence-transformers>=2.5.1",
22
- "chromadb>=0.4.24",
21
+ "colorama>=0.4.6",
23
22
  ],
24
23
  entry_points={
25
24
  "console_scripts": [
@@ -1,3 +1,3 @@
1
1
  """Jarvis AI Assistant"""
2
2
 
3
- __version__ = "0.1.3"
3
+ __version__ = "0.1.6"
@@ -0,0 +1,195 @@
1
+ import json
2
+ import subprocess
3
+ from typing import Dict, Any, List, Optional
4
+ from .tools import ToolRegistry
5
+ from .utils import PrettyOutput, OutputType, get_multiline_input
6
+ from .models import BaseModel
7
+ import re
8
+ import os
9
+ from datetime import datetime
10
+
11
+ class Agent:
12
+ def __init__(self, model: BaseModel, tool_registry: ToolRegistry, name: str = "Jarvis"):
13
+ """Initialize Agent with a model, optional tool registry and name"""
14
+ self.model = model
15
+ self.tool_registry = tool_registry or ToolRegistry(model)
16
+ self.name = name
17
+
18
+ # 构建工具说明
19
+ tools_prompt = "Available Tools:\n"
20
+ for tool in self.tool_registry.get_all_tools():
21
+ tools_prompt += f"- Tool: {tool['function']['name']}\n"
22
+ tools_prompt += f" Description: {tool['function']['description']}\n"
23
+ tools_prompt += f" Arguments: {tool['function']['parameters']}\n"
24
+
25
+ self.messages = [
26
+ {
27
+ "role": "system",
28
+ "content": f"""You are {name}, an AI assistant that follows the ReAct (Reasoning + Acting) framework to solve tasks step by step.
29
+
30
+ FRAMEWORK:
31
+ 1. Thought: Analyze the current situation and plan the next step
32
+ 2. Action: Execute ONE specific tool call
33
+ 3. Observation: Review the result
34
+ 4. Next: Plan the next step or conclude
35
+
36
+ FORMAT:
37
+ Thought: I need to [reasoning about the current situation]...
38
+ Action: I will use [tool] to [purpose]...
39
+ <START_TOOL_CALL>
40
+ name: tool_name
41
+ arguments:
42
+ param1: value1
43
+ <END_TOOL_CALL>
44
+
45
+ After receiving result:
46
+ Observation: The tool returned [analyze result]...
47
+ Next: Based on this, I will [next step]...
48
+
49
+ CORE RULES:
50
+ 1. ONE Action Per Response
51
+ - Only ONE tool call per response
52
+ - Additional tool calls will be ignored
53
+ - Complete current step before next
54
+
55
+ 2. Clear Reasoning
56
+ - Explain your thought process
57
+ - Justify tool selection
58
+ - Analyze results thoroughly
59
+
60
+ Examples:
61
+ ✓ Good Response:
62
+ Thought: I need to check the content of utils.py first to understand its structure.
63
+ Action: I will read the file content.
64
+ <START_TOOL_CALL>
65
+ name: file_operation
66
+ arguments:
67
+ operation: read
68
+ filepath: src/utils.py
69
+ <END_TOOL_CALL>
70
+
71
+ ✗ Bad Response:
72
+ Thought: Let's analyze the code.
73
+ Action: I'll read and check everything.
74
+ [Multiple or vague tool calls...]
75
+
76
+ Remember:
77
+ - Always start with "Thought:"
78
+ - Use exactly ONE tool per response
79
+ - Wait for results before next step
80
+ - Clearly explain your reasoning
81
+
82
+ {tools_prompt}"""
83
+ }
84
+ ]
85
+
86
+ def _call_model(self, messages: List[Dict], use_tools: bool = True) -> Dict:
87
+ """调用模型获取响应"""
88
+ try:
89
+ return self.model.chat(
90
+ messages=messages,
91
+ tools=self.tool_registry.get_all_tools() if use_tools else []
92
+ )
93
+ except Exception as e:
94
+ raise Exception(f"{self.name}: 模型调用失败: {str(e)}")
95
+
96
+ def run(self, user_input: str) -> str:
97
+ """处理用户输入并返回响应,返回任务总结报告"""
98
+ self.clear_history()
99
+
100
+ # 显示任务开始
101
+ PrettyOutput.section(f"开始新任务: {self.name}", OutputType.PLANNING)
102
+
103
+ self.messages.append({
104
+ "role": "user",
105
+ "content": user_input
106
+ })
107
+
108
+ while True:
109
+ try:
110
+ # 显示思考状态
111
+ PrettyOutput.print("分析任务...", OutputType.PROGRESS)
112
+ response = self._call_model(self.messages)
113
+ current_response = response
114
+
115
+ # 流式输出已经在model中处理,这里添加换行
116
+ PrettyOutput.print_stream_end()
117
+
118
+ self.messages.append({
119
+ "role": "assistant",
120
+ "content": response["message"].get("content", ""),
121
+ "tool_calls": current_response["message"]["tool_calls"]
122
+ })
123
+
124
+ if len(current_response["message"]["tool_calls"]) > 0:
125
+ if current_response["message"].get("content"):
126
+ PrettyOutput.print(current_response["message"]["content"], OutputType.SYSTEM)
127
+
128
+ try:
129
+ # 显示工具调用
130
+ PrettyOutput.print("执行工具调用...", OutputType.PROGRESS)
131
+ tool_result = self.tool_registry.handle_tool_calls(current_response["message"]["tool_calls"])
132
+ PrettyOutput.print(tool_result, OutputType.RESULT)
133
+ except Exception as e:
134
+ PrettyOutput.print(str(e), OutputType.ERROR)
135
+ tool_result = f"Tool call failed: {str(e)}"
136
+
137
+ self.messages.append({
138
+ "role": "tool",
139
+ "content": tool_result
140
+ })
141
+ continue
142
+
143
+ # 获取用户输入
144
+ user_input = get_multiline_input(f"{self.name}: 您可以继续输入,或输入空行结束当前任务")
145
+ if not user_input:
146
+ PrettyOutput.print("生成任务总结...", OutputType.PROGRESS)
147
+
148
+ # 生成任务总结
149
+ summary_prompt = {
150
+ "role": "user",
151
+ "content": """The task has been completed. Based on the previous analysis and execution results, provide a task summary including:
152
+
153
+ 1. Key Information:
154
+ - Essential findings from analysis
155
+ - Important results from tool executions
156
+ - Critical data discovered
157
+
158
+ 2. Task Results:
159
+ - Final outcome
160
+ - Actual achievements
161
+ - Concrete results
162
+
163
+ Focus only on facts and actual results. Be direct and concise."""
164
+ }
165
+
166
+ while True:
167
+ try:
168
+ summary_response = self._call_model(self.messages + [summary_prompt], use_tools=False)
169
+ summary = summary_response["message"].get("content", "")
170
+
171
+ # 显示任务总结
172
+ PrettyOutput.section("任务总结", OutputType.SUCCESS)
173
+ PrettyOutput.print(summary, OutputType.SYSTEM)
174
+ PrettyOutput.section("任务完成", OutputType.SUCCESS)
175
+
176
+ return summary
177
+
178
+ except Exception as e:
179
+ PrettyOutput.print(str(e), OutputType.ERROR)
180
+
181
+ if user_input == "__interrupt__":
182
+ PrettyOutput.print("任务已取消", OutputType.WARNING)
183
+ return "Task cancelled by user"
184
+
185
+ self.messages.append({
186
+ "role": "user",
187
+ "content": user_input
188
+ })
189
+
190
+ except Exception as e:
191
+ PrettyOutput.print(str(e), OutputType.ERROR)
192
+
193
+ def clear_history(self):
194
+ """清除对话历史,只保留系统提示"""
195
+ self.messages = [self.messages[0]]
@@ -13,46 +13,60 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
13
13
  from jarvis.agent import Agent
14
14
  from jarvis.tools import ToolRegistry
15
15
  from jarvis.models import DDGSModel, OllamaModel
16
- from jarvis.utils import PrettyOutput, OutputType, get_multiline_input
16
+ from jarvis.utils import PrettyOutput, OutputType, get_multiline_input, load_env_from_file
17
+ from jarvis.zte_llm import create_zte_llm
17
18
 
18
19
  # 定义支持的平台和模型
19
20
  SUPPORTED_PLATFORMS = {
20
21
  "ollama": {
21
- "models": ["llama3.2", "qwen2.5:14b"],
22
+ "models": ["qwen2.5:14b", "qwq"],
22
23
  "default": "qwen2.5:14b"
23
24
  },
24
25
  "ddgs": {
25
26
  "models": ["gpt-4o-mini", "claude-3-haiku", "llama-3.1-70b", "mixtral-8x7b"],
26
27
  "default": "gpt-4o-mini"
28
+ },
29
+ "zte": {
30
+ "models": ["NebulaBiz", "nebulacoder", "NTele-72B"],
31
+ "default": "NebulaBiz"
27
32
  }
28
33
  }
29
34
 
30
- def load_tasks() -> list:
35
+ def load_tasks() -> dict:
31
36
  """Load tasks from .jarvis file if it exists."""
32
37
  if not os.path.exists(".jarvis"):
33
- return []
38
+ return {}
34
39
 
35
40
  try:
36
41
  with open(".jarvis", "r", encoding="utf-8") as f:
37
42
  tasks = yaml.safe_load(f)
38
43
 
39
- if not isinstance(tasks, list):
40
- PrettyOutput.print("Warning: .jarvis file should contain a list of tasks", OutputType.ERROR)
41
- return []
44
+ if not isinstance(tasks, dict):
45
+ PrettyOutput.print("Warning: .jarvis file should contain a dictionary of task_name: task_description", OutputType.ERROR)
46
+ return {}
42
47
 
43
- return [str(task) for task in tasks if task] # Convert all tasks to strings and filter out empty ones
48
+ # Validate format and convert all values to strings
49
+ validated_tasks = {}
50
+ for name, desc in tasks.items():
51
+ if desc: # Ensure description is not empty
52
+ validated_tasks[str(name)] = str(desc)
53
+
54
+ return validated_tasks
44
55
  except Exception as e:
45
56
  PrettyOutput.print(f"Error loading .jarvis file: {str(e)}", OutputType.ERROR)
46
- return []
57
+ return {}
47
58
 
48
- def select_task(tasks: list) -> str:
49
- """Let user select a task from the list or skip."""
59
+ def select_task(tasks: dict) -> str:
60
+ """Let user select a task from the list or skip. Returns task description if selected."""
50
61
  if not tasks:
51
62
  return ""
52
63
 
53
- PrettyOutput.print("\nFound predefined tasks:", OutputType.INFO)
54
- for i, task in enumerate(tasks, 1):
55
- PrettyOutput.print(f"[{i}] {task}", OutputType.INFO)
64
+ # Convert tasks to list for ordered display
65
+ task_names = list(tasks.keys())
66
+
67
+ PrettyOutput.print("\nAvailable tasks:", OutputType.INFO)
68
+ for i, name in enumerate(task_names, 1):
69
+ PrettyOutput.print(f"[{i}] {name}", OutputType.INFO)
56
70
  PrettyOutput.print("[0] Skip predefined tasks", OutputType.INFO)
57
71
 
58
72
  while True:
@@ -64,8 +78,9 @@ def select_task(tasks: list) -> str:
64
78
  choice = int(choice)
65
79
  if choice == 0:
66
80
  return ""
67
- elif 1 <= choice <= len(tasks):
68
- return tasks[choice - 1]
81
+ elif 1 <= choice <= len(task_names):
82
+ selected_name = task_names[choice - 1]
83
+ return tasks[selected_name] # Return the task description
69
84
  else:
70
85
  PrettyOutput.print("Invalid choice. Please try again.", OutputType.ERROR)
71
86
  except ValueError:
@@ -73,13 +88,16 @@ def select_task(tasks: list) -> str:
73
88
 
74
89
  def main():
75
90
  """Main entry point for Jarvis."""
91
+
92
+ load_env_from_file()
93
+
76
94
  parser = argparse.ArgumentParser(description="Jarvis AI Assistant")
77
95
 
78
96
  # 添加平台选择参数
79
97
  parser.add_argument(
80
98
  "--platform",
81
99
  choices=list(SUPPORTED_PLATFORMS.keys()),
82
- default="ollama",
100
+ default=os.getenv("JARVIS_PLATFORM") or "ddgs",
83
101
  help="选择运行平台 (默认: ollama)"
84
102
  )
85
103
 
@@ -92,11 +110,13 @@ def main():
92
110
  # 添加API基础URL参数
93
111
  parser.add_argument(
94
112
  "--api-base",
95
- default="http://localhost:11434",
113
+ default=os.getenv("JARVIS_OLLAMA_API_BASE") or "http://localhost:11434",
96
114
  help="Ollama API基础URL (仅用于Ollama平台, 默认: http://localhost:11434)"
97
115
  )
98
116
 
99
117
  args = parser.parse_args()
118
+
119
+ args.model = args.model or os.getenv("JARVIS_MODEL")
100
120
 
101
121
  # 验证并设置默认模型
102
122
  if args.model:
@@ -119,11 +139,14 @@ def main():
119
139
  api_base=args.api_base
120
140
  )
121
141
  platform_name = f"Ollama ({args.model})"
122
- else: # ddgs
142
+ elif args.platform == "ddgs": # ddgs
123
143
  model = DDGSModel(model_name=args.model)
124
144
  platform_name = f"DuckDuckGo Search ({args.model})"
145
+ elif args.platform == "zte": # zte
146
+ model = create_zte_llm(model_name=args.model)
147
+ platform_name = f"ZTE ({args.model})"
125
148
 
126
- tool_registry = ToolRegistry()
149
+ tool_registry = ToolRegistry(model)
127
150
  agent = Agent(model, tool_registry)
128
151
 
129
152
  # 欢迎信息
@@ -142,12 +165,9 @@ def main():
142
165
  while True:
143
166
  try:
144
167
  user_input = get_multiline_input("请输入您的任务(输入空行退出):")
145
- if not user_input:
168
+ if not user_input or user_input == "__interrupt__":
146
169
  break
147
170
  agent.run(user_input)
148
- except KeyboardInterrupt:
149
- print("\n正在退出...")
150
- break
151
171
  except Exception as e:
152
172
  PrettyOutput.print(f"错误: {str(e)}", OutputType.ERROR)
153
173
 
@@ -0,0 +1,130 @@
1
+ import re
2
+ import time
3
+ from typing import Dict, List, Optional
4
+ from duckduckgo_search import DDGS
5
+ import ollama
6
+ from abc import ABC, abstractmethod
7
+ import yaml
8
+
9
+ from .utils import OutputType, PrettyOutput
10
+
11
+ class BaseModel(ABC):
12
+ """大语言模型基类"""
13
+
14
+ @abstractmethod
15
+ def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
16
+ """执行对话"""
17
+ pass
18
+
19
+ @staticmethod
20
+ def extract_tool_calls(content: str) -> List[Dict]:
21
+ """从内容中提取工具调用,只返回第一个有效的工具调用"""
22
+ # 分割内容为行
23
+ lines = content.split('\n')
24
+ tool_call_lines = []
25
+ in_tool_call = False
26
+
27
+ # 逐行处理
28
+ for line in lines:
29
+ if not line:
30
+ continue
31
+
32
+ if line == '<START_TOOL_CALL>':
33
+ tool_call_lines = []
34
+ in_tool_call = True
35
+ continue
36
+ elif line == '<END_TOOL_CALL>':
37
+ if in_tool_call and tool_call_lines:
38
+ try:
39
+ # 解析工具调用内容
40
+ tool_call_text = '\n'.join(tool_call_lines)
41
+ tool_call_data = yaml.safe_load(tool_call_text)
42
+
43
+ # 验证必要的字段
44
+ if "name" in tool_call_data and "arguments" in tool_call_data:
45
+ # 只返回第一个有效的工具调用
46
+ return [{
47
+ "function": {
48
+ "name": tool_call_data["name"],
49
+ "arguments": tool_call_data["arguments"]
50
+ }
51
+ }]
52
+ except yaml.YAMLError:
53
+ pass # 跳过无效的YAML
54
+ except Exception:
55
+ pass # 跳过其他错误
56
+ in_tool_call = False
57
+ elif in_tool_call:
58
+ tool_call_lines.append(line)
59
+
60
+ return [] # 如果没有找到有效的工具调用,返回空列表
61
+
62
+
63
+ class DDGSModel(BaseModel):
64
+ def __init__(self, model_name: str = "gpt-4o-mini"):
65
+ """
66
+ [1]: gpt-4o-mini
67
+ [2]: claude-3-haiku
68
+ [3]: llama-3.1-70b
69
+ [4]: mixtral-8x7b
70
+ """
71
+ self.model_name = model_name
72
+
73
+ def __make_prompt(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> str:
74
+ prompt = ""
75
+ for message in messages:
76
+ prompt += f"[{message['role']}]: {message['content']}\n"
77
+ return prompt
78
+
79
+ def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
80
+ ddgs = DDGS()
81
+ prompt = self.__make_prompt(messages, tools)
82
+ content = ddgs.chat(prompt)
83
+ PrettyOutput.print_stream(content, OutputType.SYSTEM)
84
+ tool_calls = BaseModel.extract_tool_calls(content)
85
+ return {
86
+ "message": {
87
+ "content": content,
88
+ "tool_calls": tool_calls
89
+ }
90
+ }
91
+
92
+
93
+ class OllamaModel(BaseModel):
94
+ """Ollama模型实现"""
95
+
96
+ def __init__(self, model_name: str = "qwen2.5:14b", api_base: str = "http://localhost:11434"):
97
+ self.model_name = model_name
98
+ self.api_base = api_base
99
+ self.client = ollama.Client(host=api_base)
100
+
101
+ def chat(self, messages: List[Dict], tools: Optional[List[Dict]] = None) -> Dict:
102
+ """调用Ollama API获取响应"""
103
+ try:
104
+ # 使用流式调用
105
+ stream = self.client.chat(
106
+ model=self.model_name,
107
+ messages=messages,
108
+ stream=True
109
+ )
110
+
111
+ # 收集完整响应
112
+ content_parts = []
113
+ for chunk in stream:
114
+ if chunk.message.content:
115
+ content_parts.append(chunk.message.content)
116
+ # 实时打印内容
117
+ PrettyOutput.print_stream(chunk.message.content, OutputType.SYSTEM)
118
+
119
+ # 合并完整内容
120
+ content = "".join(content_parts)
121
+ tool_calls = BaseModel.extract_tool_calls(content)
122
+
123
+ return {
124
+ "message": {
125
+ "content": content,
126
+ "tool_calls": tool_calls
127
+ }
128
+ }
129
+ except Exception as e:
130
+ raise Exception(f"Ollama API调用失败: {str(e)}")
@@ -1,22 +1,16 @@
1
1
  from .base import Tool, ToolRegistry
2
- from .python_script import PythonScript
3
2
  from .file_ops import FileOperationTool
4
3
  from .search import SearchTool
5
4
  from .shell import ShellTool
6
- from .user_interaction import UserInteractionTool
7
- from .user_confirmation import UserConfirmationTool
8
- from .rag import RAGTool
9
5
  from .webpage import WebpageTool
6
+ from .user_input import UserInputTool
10
7
 
11
8
  __all__ = [
12
9
  'Tool',
13
10
  'ToolRegistry',
14
- 'PythonScript',
15
11
  'FileOperationTool',
16
12
  'SearchTool',
17
13
  'ShellTool',
18
- 'UserInteractionTool',
19
- 'UserConfirmationTool',
20
- 'RAGTool',
21
14
  'WebpageTool',
22
- ]
15
+ 'UserInputTool',
16
+ ]