jarvis-ai-assistant 0.1.131__py3-none-any.whl → 0.1.132__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +48 -29
- jarvis/jarvis_agent/patch.py +61 -43
- jarvis/jarvis_agent/shell_input_handler.py +1 -1
- jarvis/jarvis_code_agent/code_agent.py +87 -86
- jarvis/jarvis_dev/main.py +335 -626
- jarvis/jarvis_git_squash/main.py +10 -31
- jarvis/jarvis_multi_agent/__init__.py +19 -28
- jarvis/jarvis_platform/ai8.py +7 -32
- jarvis/jarvis_platform/base.py +2 -7
- jarvis/jarvis_platform/kimi.py +3 -144
- jarvis/jarvis_platform/ollama.py +54 -68
- jarvis/jarvis_platform/openai.py +0 -4
- jarvis/jarvis_platform/oyi.py +0 -75
- jarvis/jarvis_platform/yuanbao.py +264 -0
- jarvis/jarvis_rag/file_processors.py +138 -0
- jarvis/jarvis_rag/main.py +1305 -425
- jarvis/jarvis_tools/ask_codebase.py +205 -39
- jarvis/jarvis_tools/code_review.py +125 -99
- jarvis/jarvis_tools/execute_python_script.py +58 -0
- jarvis/jarvis_tools/execute_shell.py +13 -26
- jarvis/jarvis_tools/execute_shell_script.py +1 -1
- jarvis/jarvis_tools/file_analyzer.py +271 -0
- jarvis/jarvis_tools/file_operation.py +1 -1
- jarvis/jarvis_tools/find_caller.py +213 -0
- jarvis/jarvis_tools/find_symbol.py +211 -0
- jarvis/jarvis_tools/function_analyzer.py +248 -0
- jarvis/jarvis_tools/git_commiter.py +4 -4
- jarvis/jarvis_tools/methodology.py +89 -48
- jarvis/jarvis_tools/project_analyzer.py +220 -0
- jarvis/jarvis_tools/read_code.py +23 -2
- jarvis/jarvis_tools/read_webpage.py +195 -81
- jarvis/jarvis_tools/registry.py +132 -11
- jarvis/jarvis_tools/search_web.py +55 -10
- jarvis/jarvis_tools/tool_generator.py +6 -8
- jarvis/jarvis_utils/__init__.py +1 -0
- jarvis/jarvis_utils/config.py +67 -3
- jarvis/jarvis_utils/embedding.py +344 -45
- jarvis/jarvis_utils/git_utils.py +9 -1
- jarvis/jarvis_utils/input.py +7 -6
- jarvis/jarvis_utils/methodology.py +379 -7
- jarvis/jarvis_utils/output.py +5 -3
- jarvis/jarvis_utils/utils.py +59 -7
- {jarvis_ai_assistant-0.1.131.dist-info → jarvis_ai_assistant-0.1.132.dist-info}/METADATA +3 -2
- jarvis_ai_assistant-0.1.132.dist-info/RECORD +82 -0
- {jarvis_ai_assistant-0.1.131.dist-info → jarvis_ai_assistant-0.1.132.dist-info}/entry_points.txt +2 -0
- jarvis/jarvis_codebase/__init__.py +0 -0
- jarvis/jarvis_codebase/main.py +0 -1011
- jarvis/jarvis_tools/treesitter_analyzer.py +0 -331
- jarvis/jarvis_treesitter/README.md +0 -104
- jarvis/jarvis_treesitter/__init__.py +0 -20
- jarvis/jarvis_treesitter/database.py +0 -258
- jarvis/jarvis_treesitter/example.py +0 -115
- jarvis/jarvis_treesitter/grammar_builder.py +0 -182
- jarvis/jarvis_treesitter/language.py +0 -117
- jarvis/jarvis_treesitter/symbol.py +0 -31
- jarvis/jarvis_treesitter/tools_usage.md +0 -121
- jarvis_ai_assistant-0.1.131.dist-info/RECORD +0 -85
- {jarvis_ai_assistant-0.1.131.dist-info → jarvis_ai_assistant-0.1.132.dist-info}/LICENSE +0 -0
- {jarvis_ai_assistant-0.1.131.dist-info → jarvis_ai_assistant-0.1.132.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.1.131.dist-info → jarvis_ai_assistant-0.1.132.dist-info}/top_level.txt +0 -0
jarvis/jarvis_git_squash/main.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import re
|
|
1
2
|
import sys
|
|
2
3
|
import argparse
|
|
3
4
|
from typing import Dict, Any
|
|
@@ -26,39 +27,23 @@ class GitSquashTool:
|
|
|
26
27
|
except Exception:
|
|
27
28
|
return False
|
|
28
29
|
|
|
29
|
-
def execute(self, args: Dict)
|
|
30
|
+
def execute(self, args: Dict):
|
|
30
31
|
"""Execute the squash operation"""
|
|
31
32
|
try:
|
|
32
33
|
if not self._confirm_squash():
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
"stdout": "Operation cancelled",
|
|
36
|
-
"stderr": ""
|
|
37
|
-
}
|
|
34
|
+
PrettyOutput.print("操作已取消", OutputType.WARNING)
|
|
35
|
+
return
|
|
38
36
|
|
|
39
37
|
if not self._reset_to_commit(args['commit_hash']):
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
"stdout": "",
|
|
43
|
-
"stderr": "Failed to reset to specified commit"
|
|
44
|
-
}
|
|
38
|
+
PrettyOutput.print("重置到指定提交失败", OutputType.WARNING)
|
|
39
|
+
return
|
|
45
40
|
|
|
46
41
|
# Use existing GitCommitTool for new commit
|
|
47
42
|
commit_tool = GitCommitTool()
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
return {
|
|
51
|
-
"success": result['success'],
|
|
52
|
-
"stdout": result['stdout'],
|
|
53
|
-
"stderr": result['stderr']
|
|
54
|
-
}
|
|
55
|
-
|
|
43
|
+
commit_tool.execute({"lang": args.get('lang', 'Chinese')})
|
|
56
44
|
except Exception as e:
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
"stdout": "",
|
|
60
|
-
"stderr": f"Squash failed: {str(e)}"
|
|
61
|
-
}
|
|
45
|
+
PrettyOutput.print(f"压缩提交失败: {str(e)}", OutputType.WARNING)
|
|
46
|
+
|
|
62
47
|
def main():
|
|
63
48
|
init_env()
|
|
64
49
|
parser = argparse.ArgumentParser(description='Git squash tool')
|
|
@@ -67,15 +52,9 @@ def main():
|
|
|
67
52
|
args = parser.parse_args()
|
|
68
53
|
|
|
69
54
|
tool = GitSquashTool()
|
|
70
|
-
|
|
55
|
+
tool.execute({
|
|
71
56
|
'commit_hash': args.commit_hash,
|
|
72
57
|
'lang': args.lang
|
|
73
58
|
})
|
|
74
|
-
|
|
75
|
-
if not result['success']:
|
|
76
|
-
PrettyOutput.print(result['stderr'], OutputType.ERROR)
|
|
77
|
-
sys.exit(1)
|
|
78
|
-
|
|
79
|
-
PrettyOutput.print(result['stdout'], OutputType.SUCCESS)
|
|
80
59
|
if __name__ == "__main__":
|
|
81
60
|
sys.exit(main())
|
|
@@ -8,7 +8,7 @@ from jarvis.jarvis_agent.output_handler import OutputHandler
|
|
|
8
8
|
from jarvis.jarvis_tools.registry import ToolRegistry
|
|
9
9
|
from jarvis.jarvis_utils.input import get_multiline_input
|
|
10
10
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
11
|
-
from jarvis.jarvis_utils.utils import init_env
|
|
11
|
+
from jarvis.jarvis_utils.utils import ct, ot, init_env
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class MultiAgent(OutputHandler):
|
|
@@ -20,12 +20,7 @@ class MultiAgent(OutputHandler):
|
|
|
20
20
|
|
|
21
21
|
def prompt(self) -> str:
|
|
22
22
|
return f"""
|
|
23
|
-
#
|
|
24
|
-
|
|
25
|
-
## 身份与角色定位
|
|
26
|
-
- **核心职责**:作为多智能体系统的协调者,通过结构化消息实现高效协作
|
|
27
|
-
- **关键能力**:消息路由、任务分发、结果整合、流程协调
|
|
28
|
-
- **工作范围**:在多个专业智能体之间建立有效沟通渠道
|
|
23
|
+
# 多智能体消息发送
|
|
29
24
|
|
|
30
25
|
## 交互原则与策略
|
|
31
26
|
### 消息处理规范
|
|
@@ -36,7 +31,7 @@ class MultiAgent(OutputHandler):
|
|
|
36
31
|
|
|
37
32
|
### 消息格式标准
|
|
38
33
|
```
|
|
39
|
-
|
|
34
|
+
{ot("SEND_MESSAGE")}
|
|
40
35
|
to: 智能体名称 # 目标智能体名称
|
|
41
36
|
content: |
|
|
42
37
|
# 消息主题
|
|
@@ -52,31 +47,27 @@ content: |
|
|
|
52
47
|
|
|
53
48
|
## 期望结果
|
|
54
49
|
[描述期望的输出格式和内容]
|
|
55
|
-
|
|
50
|
+
|
|
51
|
+
## 下一步计划
|
|
52
|
+
[描述下一步的计划和行动]
|
|
53
|
+
{ct("SEND_MESSAGE")}
|
|
56
54
|
```
|
|
57
55
|
|
|
58
|
-
|
|
59
|
-
### 任务分发流程
|
|
60
|
-
1. **需求分析**:理解用户需求并确定最适合的智能体
|
|
61
|
-
2. **任务分解**:将复杂任务分解为可管理的子任务
|
|
62
|
-
3. **精准分发**:根据专长将任务分配给合适的智能体
|
|
63
|
-
4. **结果整合**:收集各智能体的输出并整合为连贯结果
|
|
56
|
+
或者:
|
|
64
57
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
58
|
+
```
|
|
59
|
+
{ot("SEND_MESSAGE")}
|
|
60
|
+
to: 智能体名称 # 目标智能体名称
|
|
61
|
+
content: |
|
|
62
|
+
# 消息主题
|
|
63
|
+
|
|
64
|
+
## 任务结果
|
|
65
|
+
[任务完成结果,用于反馈]
|
|
66
|
+
{ct("SEND_MESSAGE")}
|
|
67
|
+
```
|
|
70
68
|
|
|
71
69
|
## 可用智能体资源
|
|
72
70
|
{chr(10).join([f"- **{c['name']}**: {c.get('description', '')}" for c in self.agents_config])}
|
|
73
|
-
|
|
74
|
-
## 最佳实践指南
|
|
75
|
-
1. **任务明确化**:每个消息专注于单一、明确的任务
|
|
76
|
-
2. **信息充分性**:提供足够信息让接收者能独立完成任务
|
|
77
|
-
3. **反馈循环**:建立清晰的反馈机制,及时调整方向
|
|
78
|
-
4. **知识共享**:确保关键信息在相关智能体间共享
|
|
79
|
-
5. **协作效率**:避免不必要的消息传递,减少协调开销
|
|
80
71
|
"""
|
|
81
72
|
|
|
82
73
|
def can_handle(self, response: str) -> bool:
|
|
@@ -102,7 +93,7 @@ content: |
|
|
|
102
93
|
Args:
|
|
103
94
|
content: The content containing send message
|
|
104
95
|
"""
|
|
105
|
-
data = re.findall(r'
|
|
96
|
+
data = re.findall(ot("SEND_MESSAGE")+r'\n(.*?)\n'+ct("SEND_MESSAGE"), content, re.DOTALL)
|
|
106
97
|
ret = []
|
|
107
98
|
for item in data:
|
|
108
99
|
try:
|
jarvis/jarvis_platform/ai8.py
CHANGED
|
@@ -23,7 +23,6 @@ class AI8Model(BasePlatform):
|
|
|
23
23
|
super().__init__()
|
|
24
24
|
self.system_message = ""
|
|
25
25
|
self.conversation = {}
|
|
26
|
-
self.files = []
|
|
27
26
|
self.models = {} # 存储模型信息
|
|
28
27
|
|
|
29
28
|
self.token = os.getenv("AI8_API_KEY")
|
|
@@ -103,18 +102,6 @@ class AI8Model(BasePlatform):
|
|
|
103
102
|
except Exception as e:
|
|
104
103
|
PrettyOutput.print(f"创建会话失败: {str(e)}", OutputType.ERROR)
|
|
105
104
|
return False
|
|
106
|
-
|
|
107
|
-
def upload_files(self, file_list: List[str]) -> List[Dict]:
|
|
108
|
-
for file_path in file_list:
|
|
109
|
-
name = os.path.basename(file_path)
|
|
110
|
-
with open(file_path, 'rb') as f:
|
|
111
|
-
file_data = f.read()
|
|
112
|
-
base64_data = base64.b64encode(file_data).decode('utf-8')
|
|
113
|
-
self.files.append({
|
|
114
|
-
"name": name,
|
|
115
|
-
"data": f"data:image/png;base64,{base64_data}"
|
|
116
|
-
})
|
|
117
|
-
return self.files
|
|
118
105
|
|
|
119
106
|
def set_system_message(self, message: str):
|
|
120
107
|
"""Set system message"""
|
|
@@ -145,14 +132,6 @@ class AI8Model(BasePlatform):
|
|
|
145
132
|
"files": []
|
|
146
133
|
}
|
|
147
134
|
|
|
148
|
-
# 如果有文件需要发送
|
|
149
|
-
if self.files:
|
|
150
|
-
for file_data in self.files:
|
|
151
|
-
payload["files"].append({
|
|
152
|
-
"name": file_data["name"],
|
|
153
|
-
"data": file_data["data"]
|
|
154
|
-
})
|
|
155
|
-
self.files = [] # 清空已使用的文件
|
|
156
135
|
|
|
157
136
|
response = requests.post(
|
|
158
137
|
f"{self.BASE_URL}/api/chat/completions",
|
|
@@ -200,7 +179,6 @@ class AI8Model(BasePlatform):
|
|
|
200
179
|
def reset(self):
|
|
201
180
|
"""Reset model state"""
|
|
202
181
|
self.conversation = None
|
|
203
|
-
self.files = [] # 清空文件列表
|
|
204
182
|
|
|
205
183
|
def delete_chat(self) -> bool:
|
|
206
184
|
"""Delete current chat session"""
|
|
@@ -284,13 +262,6 @@ class AI8Model(BasePlatform):
|
|
|
284
262
|
# 添加标签
|
|
285
263
|
model_str = f"{model['label']}"
|
|
286
264
|
|
|
287
|
-
# 添加标签和积分信息
|
|
288
|
-
attrs = []
|
|
289
|
-
if model['attr'].get('tag'):
|
|
290
|
-
attrs.append(model['attr']['tag'])
|
|
291
|
-
if model['attr'].get('integral'):
|
|
292
|
-
attrs.append(model['attr']['integral'])
|
|
293
|
-
|
|
294
265
|
# 添加特性标记
|
|
295
266
|
features = []
|
|
296
267
|
if model['attr'].get('multimodal'):
|
|
@@ -299,12 +270,16 @@ class AI8Model(BasePlatform):
|
|
|
299
270
|
features.append("Plugin support")
|
|
300
271
|
if model['attr'].get('onlyImg'):
|
|
301
272
|
features.append("Image support")
|
|
302
|
-
if
|
|
303
|
-
|
|
304
|
-
|
|
273
|
+
if model['attr'].get('tag'):
|
|
274
|
+
features.append(model['attr']['tag'])
|
|
275
|
+
if model['attr'].get('integral'):
|
|
276
|
+
features.append(model['attr']['integral'])
|
|
305
277
|
# 添加备注
|
|
306
278
|
if model['attr'].get('note'):
|
|
307
279
|
model_str += f" - {model['attr']['note']}"
|
|
280
|
+
if features:
|
|
281
|
+
model_str += f" [{'|'.join(features)}]"
|
|
282
|
+
|
|
308
283
|
model['desc'] = model_str
|
|
309
284
|
|
|
310
285
|
return list(self.models.keys())
|
jarvis/jarvis_platform/base.py
CHANGED
|
@@ -2,7 +2,7 @@ from abc import ABC, abstractmethod
|
|
|
2
2
|
import re
|
|
3
3
|
from typing import Dict, List, Tuple
|
|
4
4
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
5
|
-
from jarvis.jarvis_utils.utils import get_context_token_count, while_success, while_true
|
|
5
|
+
from jarvis.jarvis_utils.utils import ct, ot, get_context_token_count, while_success, while_true
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class BasePlatform(ABC):
|
|
@@ -53,16 +53,11 @@ class BasePlatform(ABC):
|
|
|
53
53
|
)
|
|
54
54
|
|
|
55
55
|
# Keep original think tag handling
|
|
56
|
-
response = re.sub(r'
|
|
56
|
+
response = re.sub(ot("think")+r'.*?'+ct("think"), '', response, flags=re.DOTALL)
|
|
57
57
|
return response
|
|
58
58
|
|
|
59
59
|
return while_true(lambda: while_success(lambda: _chat(), 5), 5)
|
|
60
60
|
|
|
61
|
-
@abstractmethod
|
|
62
|
-
def upload_files(self, file_list: List[str]) -> List[Dict]:
|
|
63
|
-
"""Upload files"""
|
|
64
|
-
raise NotImplementedError("upload_files is not implemented")
|
|
65
|
-
|
|
66
61
|
@abstractmethod
|
|
67
62
|
def reset(self):
|
|
68
63
|
"""Reset model"""
|
jarvis/jarvis_platform/kimi.py
CHANGED
|
@@ -45,7 +45,6 @@ class KimiModel(BasePlatform):
|
|
|
45
45
|
PrettyOutput.print("KIMI_API_KEY 未设置", OutputType.WARNING)
|
|
46
46
|
self.auth_header = f"Bearer {self.api_key}"
|
|
47
47
|
self.chat_id = ""
|
|
48
|
-
self.uploaded_files = [] # 存储已上传文件的信息
|
|
49
48
|
self.first_chat = True # 添加标记,用于判断是否是第一次对话
|
|
50
49
|
self.system_message = ""
|
|
51
50
|
|
|
@@ -77,138 +76,6 @@ class KimiModel(BasePlatform):
|
|
|
77
76
|
PrettyOutput.print(f"错误:创建会话失败:{e}", OutputType.ERROR)
|
|
78
77
|
return False
|
|
79
78
|
|
|
80
|
-
def _get_presigned_url(self, filename: str, action: str) -> Dict:
|
|
81
|
-
"""Get presigned upload URL"""
|
|
82
|
-
url = "https://kimi.moonshot.cn/api/pre-sign-url"
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
payload = json.dumps({
|
|
87
|
-
"action": action,
|
|
88
|
-
"name": os.path.basename(filename)
|
|
89
|
-
}, ensure_ascii=False)
|
|
90
|
-
|
|
91
|
-
headers = {
|
|
92
|
-
'Authorization': self.auth_header,
|
|
93
|
-
'Content-Type': 'application/json'
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
response = while_success(lambda: requests.post(url, headers=headers, data=payload), sleep_time=5)
|
|
97
|
-
return response.json()
|
|
98
|
-
|
|
99
|
-
def _upload_file(self, file_path: str, presigned_url: str) -> bool:
|
|
100
|
-
"""Upload file to presigned URL"""
|
|
101
|
-
try:
|
|
102
|
-
with open(file_path, 'rb') as f:
|
|
103
|
-
content = f.read()
|
|
104
|
-
response = while_success(lambda: requests.put(presigned_url, data=content), sleep_time=5)
|
|
105
|
-
return response.status_code == 200
|
|
106
|
-
except Exception as e:
|
|
107
|
-
PrettyOutput.print(f"错误:上传文件失败:{e}", OutputType.ERROR)
|
|
108
|
-
return False
|
|
109
|
-
|
|
110
|
-
def _get_file_info(self, file_data: Dict, name: str, file_type: str) -> Dict:
|
|
111
|
-
"""Get file information"""
|
|
112
|
-
url = "https://kimi.moonshot.cn/api/file"
|
|
113
|
-
payload = json.dumps({
|
|
114
|
-
"type": file_type,
|
|
115
|
-
"name": name,
|
|
116
|
-
"object_name": file_data["object_name"],
|
|
117
|
-
"chat_id": self.chat_id,
|
|
118
|
-
"file_id": file_data.get("file_id", "")
|
|
119
|
-
}, ensure_ascii=False)
|
|
120
|
-
|
|
121
|
-
headers = {
|
|
122
|
-
'Authorization': self.auth_header,
|
|
123
|
-
'Content-Type': 'application/json'
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
response = while_success(lambda: requests.post(url, headers=headers, data=payload), sleep_time=5)
|
|
127
|
-
return response.json()
|
|
128
|
-
|
|
129
|
-
def _wait_for_parse(self, file_id: str) -> bool:
|
|
130
|
-
"""Wait for file parsing to complete"""
|
|
131
|
-
url = "https://kimi.moonshot.cn/api/file/parse_process"
|
|
132
|
-
headers = {
|
|
133
|
-
'Authorization': self.auth_header,
|
|
134
|
-
'Content-Type': 'application/json'
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
max_retries = 30
|
|
138
|
-
retry_count = 0
|
|
139
|
-
|
|
140
|
-
while retry_count < max_retries:
|
|
141
|
-
payload = json.dumps({"ids": [file_id]}, ensure_ascii=False)
|
|
142
|
-
response = while_success(lambda: requests.post(url, headers=headers, data=payload, stream=True), sleep_time=5)
|
|
143
|
-
|
|
144
|
-
for line in response.iter_lines():
|
|
145
|
-
if not line:
|
|
146
|
-
continue
|
|
147
|
-
|
|
148
|
-
line = line.decode('utf-8')
|
|
149
|
-
if not line.startswith("data: "):
|
|
150
|
-
continue
|
|
151
|
-
|
|
152
|
-
try:
|
|
153
|
-
data = json.loads(line[6:])
|
|
154
|
-
if data.get("event") == "resp":
|
|
155
|
-
status = data.get("file_info", {}).get("status")
|
|
156
|
-
if status == "parsed":
|
|
157
|
-
return True
|
|
158
|
-
elif status == "failed":
|
|
159
|
-
return False
|
|
160
|
-
except json.JSONDecodeError:
|
|
161
|
-
continue
|
|
162
|
-
|
|
163
|
-
retry_count += 1
|
|
164
|
-
time.sleep(1)
|
|
165
|
-
|
|
166
|
-
return False
|
|
167
|
-
def upload_files(self, file_list: List[str]) -> List[Dict]:
|
|
168
|
-
"""Upload file list and return file information"""
|
|
169
|
-
if not file_list:
|
|
170
|
-
return []
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
if not self.chat_id:
|
|
174
|
-
if not self._create_chat():
|
|
175
|
-
raise Exception("Failed to create chat session")
|
|
176
|
-
|
|
177
|
-
uploaded_files = []
|
|
178
|
-
for index, file_path in enumerate(file_list, 1):
|
|
179
|
-
try:
|
|
180
|
-
PrettyOutput.print(f"处理文件 [{index}/{len(file_list)}]: {file_path}", OutputType.PROGRESS)
|
|
181
|
-
|
|
182
|
-
mime_type, _ = mimetypes.guess_type(file_path)
|
|
183
|
-
action = "image" if mime_type and mime_type.startswith('image/') else "file"
|
|
184
|
-
|
|
185
|
-
# 获取预签名URL
|
|
186
|
-
presigned_data = self._get_presigned_url(file_path, action)
|
|
187
|
-
|
|
188
|
-
# 上传文件
|
|
189
|
-
if self._upload_file(file_path, presigned_data["url"]):
|
|
190
|
-
# 获取文件信息
|
|
191
|
-
file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
|
|
192
|
-
file_info = self._get_file_info(presigned_data, os.path.basename(file_path), action)
|
|
193
|
-
# 等待文件解析
|
|
194
|
-
|
|
195
|
-
# 只有文件需要解析
|
|
196
|
-
if action == "file":
|
|
197
|
-
if self._wait_for_parse(file_info["id"]):
|
|
198
|
-
uploaded_files.append(file_info)
|
|
199
|
-
else:
|
|
200
|
-
PrettyOutput.print(f"✗ 文件解析失败: {file_path}", OutputType.WARNING)
|
|
201
|
-
else:
|
|
202
|
-
uploaded_files.append(file_info)
|
|
203
|
-
else:
|
|
204
|
-
PrettyOutput.print(f"错误:文件上传失败: {file_path}", OutputType.WARNING)
|
|
205
|
-
|
|
206
|
-
except Exception as e:
|
|
207
|
-
PrettyOutput.print(f"✗ 处理文件出错 {file_path}: {str(e)}", OutputType.ERROR)
|
|
208
|
-
continue
|
|
209
|
-
|
|
210
|
-
self.uploaded_files = uploaded_files
|
|
211
|
-
return uploaded_files
|
|
212
79
|
|
|
213
80
|
def chat(self, message: str) -> str:
|
|
214
81
|
"""Send message and get response"""
|
|
@@ -218,15 +85,7 @@ class KimiModel(BasePlatform):
|
|
|
218
85
|
|
|
219
86
|
url = f"https://kimi.moonshot.cn/api/chat/{self.chat_id}/completion/stream"
|
|
220
87
|
|
|
221
|
-
|
|
222
|
-
refs = []
|
|
223
|
-
refs_file = []
|
|
224
|
-
if self.first_chat:
|
|
225
|
-
if self.uploaded_files:
|
|
226
|
-
refs = [f["id"] for f in self.uploaded_files]
|
|
227
|
-
refs_file = self.uploaded_files
|
|
228
|
-
message = self.system_message + "\n" + message
|
|
229
|
-
self.first_chat = False
|
|
88
|
+
|
|
230
89
|
|
|
231
90
|
payload = {
|
|
232
91
|
"messages": [{"role": "user", "content": message}],
|
|
@@ -235,8 +94,8 @@ class KimiModel(BasePlatform):
|
|
|
235
94
|
"kimiplus_id": "kimi",
|
|
236
95
|
"use_research": False,
|
|
237
96
|
"use_math": False,
|
|
238
|
-
"refs":
|
|
239
|
-
"refs_file":
|
|
97
|
+
"refs": [],
|
|
98
|
+
"refs_file": []
|
|
240
99
|
}
|
|
241
100
|
|
|
242
101
|
headers = {
|
jarvis/jarvis_platform/ollama.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import requests
|
|
2
1
|
from typing import List, Dict, Tuple
|
|
3
2
|
from jarvis.jarvis_platform.base import BasePlatform
|
|
4
3
|
import os
|
|
5
|
-
import json
|
|
6
4
|
|
|
7
|
-
from jarvis.jarvis_utils.input import get_single_line_input
|
|
8
5
|
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
9
6
|
|
|
7
|
+
|
|
8
|
+
import ollama
|
|
9
|
+
|
|
10
10
|
class OllamaPlatform(BasePlatform):
|
|
11
11
|
"""Ollama platform implementation"""
|
|
12
12
|
|
|
@@ -20,11 +20,13 @@ class OllamaPlatform(BasePlatform):
|
|
|
20
20
|
self.api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434")
|
|
21
21
|
self.model_name = os.getenv("JARVIS_MODEL") or "deepseek-r1:1.5b"
|
|
22
22
|
|
|
23
|
+
# Setup client based on availability
|
|
24
|
+
self.client = None
|
|
25
|
+
self.client = ollama.Client(host=self.api_base)
|
|
26
|
+
|
|
23
27
|
# Check if Ollama service is available
|
|
24
28
|
try:
|
|
25
|
-
|
|
26
|
-
response.raise_for_status()
|
|
27
|
-
available_models = [model["name"] for model in response.json().get("models", [])]
|
|
29
|
+
available_models = self._get_available_models()
|
|
28
30
|
|
|
29
31
|
if not available_models:
|
|
30
32
|
message = (
|
|
@@ -36,9 +38,9 @@ class OllamaPlatform(BasePlatform):
|
|
|
36
38
|
PrettyOutput.print(message, OutputType.INFO)
|
|
37
39
|
PrettyOutput.print("Ollama 没有可用的模型", OutputType.WARNING)
|
|
38
40
|
|
|
39
|
-
except
|
|
41
|
+
except Exception as e:
|
|
40
42
|
message = (
|
|
41
|
-
"Ollama
|
|
43
|
+
f"Ollama 服务未启动或无法连接: {str(e)}\n"
|
|
42
44
|
"请确保您已:\n"
|
|
43
45
|
"1. 安装 Ollama: https://ollama.ai\n"
|
|
44
46
|
"2. 启动 Ollama 服务\n"
|
|
@@ -50,11 +52,19 @@ class OllamaPlatform(BasePlatform):
|
|
|
50
52
|
self.messages = []
|
|
51
53
|
self.system_message = ""
|
|
52
54
|
|
|
55
|
+
def _get_available_models(self) -> List[str]:
|
|
56
|
+
"""Get list of available models using appropriate method"""
|
|
57
|
+
models_response = self.client.list() # type: ignore
|
|
58
|
+
return [model["model"] for model in models_response.get("models", [])]
|
|
59
|
+
|
|
53
60
|
def get_model_list(self) -> List[Tuple[str, str]]:
|
|
54
61
|
"""Get model list"""
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
62
|
+
try:
|
|
63
|
+
models = self._get_available_models()
|
|
64
|
+
return [(model, "") for model in models]
|
|
65
|
+
except Exception as e:
|
|
66
|
+
PrettyOutput.print(f"获取模型列表失败: {str(e)}", OutputType.ERROR)
|
|
67
|
+
return []
|
|
58
68
|
|
|
59
69
|
def set_model_name(self, model_name: str):
|
|
60
70
|
"""Set model name"""
|
|
@@ -69,54 +79,44 @@ class OllamaPlatform(BasePlatform):
|
|
|
69
79
|
messages.append({"role": "system", "content": self.system_message})
|
|
70
80
|
messages.extend(self.messages)
|
|
71
81
|
messages.append({"role": "user", "content": message})
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
data = {
|
|
75
|
-
"model": self.model_name,
|
|
76
|
-
"messages": messages,
|
|
77
|
-
"stream": True # 启用流式输出
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
# 发送请求
|
|
81
|
-
response = requests.post(
|
|
82
|
-
f"{self.api_base}/api/chat",
|
|
83
|
-
json=data,
|
|
84
|
-
stream=True
|
|
85
|
-
)
|
|
86
|
-
response.raise_for_status()
|
|
87
|
-
|
|
88
|
-
# 处理流式响应
|
|
89
|
-
full_response = ""
|
|
90
|
-
for line in response.iter_lines():
|
|
91
|
-
if line:
|
|
92
|
-
chunk = line.decode()
|
|
93
|
-
try:
|
|
94
|
-
result = json.loads(chunk)
|
|
95
|
-
if "message" in result and "content" in result["message"]:
|
|
96
|
-
text = result["message"]["content"]
|
|
97
|
-
if not self.suppress_output:
|
|
98
|
-
PrettyOutput.print_stream(text)
|
|
99
|
-
full_response += text
|
|
100
|
-
except json.JSONDecodeError:
|
|
101
|
-
continue
|
|
102
|
-
|
|
103
|
-
if not self.suppress_output:
|
|
104
|
-
PrettyOutput.print_stream_end()
|
|
105
|
-
|
|
106
|
-
# 更新消息历史
|
|
107
|
-
self.messages.append({"role": "user", "content": message})
|
|
108
|
-
self.messages.append({"role": "assistant", "content": full_response})
|
|
109
|
-
|
|
110
|
-
return full_response
|
|
82
|
+
|
|
83
|
+
return self._chat_with_package(messages)
|
|
111
84
|
|
|
112
85
|
except Exception as e:
|
|
113
86
|
PrettyOutput.print(f"对话失败: {str(e)}", OutputType.ERROR)
|
|
114
87
|
raise Exception(f"Chat failed: {str(e)}")
|
|
115
88
|
|
|
116
|
-
def
|
|
117
|
-
"""
|
|
118
|
-
|
|
119
|
-
|
|
89
|
+
def _chat_with_package(self, messages: List[Dict]) -> str:
|
|
90
|
+
"""Chat using the ollama package"""
|
|
91
|
+
# The client should not be None here due to the check in the chat method
|
|
92
|
+
if not self.client:
|
|
93
|
+
raise ValueError("Ollama client is not initialized")
|
|
94
|
+
|
|
95
|
+
# Use ollama-python's streaming API
|
|
96
|
+
stream = self.client.chat(
|
|
97
|
+
model=self.model_name,
|
|
98
|
+
messages=messages,
|
|
99
|
+
stream=True
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# Process the streaming response
|
|
103
|
+
full_response = ""
|
|
104
|
+
for chunk in stream:
|
|
105
|
+
if "message" in chunk and "content" in chunk["message"]:
|
|
106
|
+
text = chunk["message"]["content"]
|
|
107
|
+
if not self.suppress_output:
|
|
108
|
+
PrettyOutput.print_stream(text)
|
|
109
|
+
full_response += text
|
|
110
|
+
|
|
111
|
+
if not self.suppress_output:
|
|
112
|
+
PrettyOutput.print_stream_end()
|
|
113
|
+
|
|
114
|
+
# Update message history
|
|
115
|
+
self.messages.append({"role": "user", "content": messages[-1]["content"]})
|
|
116
|
+
self.messages.append({"role": "assistant", "content": full_response})
|
|
117
|
+
|
|
118
|
+
return full_response
|
|
119
|
+
|
|
120
120
|
|
|
121
121
|
def reset(self):
|
|
122
122
|
"""Reset model state"""
|
|
@@ -137,17 +137,3 @@ class OllamaPlatform(BasePlatform):
|
|
|
137
137
|
"""Set system message"""
|
|
138
138
|
self.system_message = message
|
|
139
139
|
self.reset() # 重置会话以应用新的系统消息
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
if __name__ == "__main__":
|
|
143
|
-
try:
|
|
144
|
-
ollama = OllamaPlatform()
|
|
145
|
-
while True:
|
|
146
|
-
try:
|
|
147
|
-
message = get_single_line_input("输入问题 (Ctrl+C 退出)")
|
|
148
|
-
ollama.chat_until_success(message)
|
|
149
|
-
except KeyboardInterrupt:
|
|
150
|
-
print("再见!")
|
|
151
|
-
break
|
|
152
|
-
except Exception as e:
|
|
153
|
-
PrettyOutput.print(f"程序异常退出: {str(e)}", OutputType.ERROR)
|
jarvis/jarvis_platform/openai.py
CHANGED
|
@@ -7,10 +7,6 @@ from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
|
7
7
|
class OpenAIModel(BasePlatform):
|
|
8
8
|
platform_name = "openai"
|
|
9
9
|
|
|
10
|
-
def upload_files(self, file_list: List[str]):
|
|
11
|
-
"""Upload files"""
|
|
12
|
-
PrettyOutput.print("OpenAI does not support file upload", OutputType.WARNING)
|
|
13
|
-
|
|
14
10
|
def __init__(self):
|
|
15
11
|
"""
|
|
16
12
|
Initialize OpenAI model
|