@becrafter/prompt-manager 0.0.18 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/IFLOW.md +175 -0
- package/README.md +145 -234
- package/app/desktop/assets/app.1.png +0 -0
- package/app/desktop/assets/app.png +0 -0
- package/app/desktop/assets/icons/icon.icns +0 -0
- package/app/desktop/assets/icons/icon.ico +0 -0
- package/app/desktop/assets/icons/icon.png +0 -0
- package/app/desktop/assets/icons/tray.png +0 -0
- package/app/desktop/assets/templates/about.html +147 -0
- package/app/desktop/assets/tray.png +0 -0
- package/app/desktop/main.js +187 -732
- package/app/desktop/package-lock.json +723 -522
- package/app/desktop/package.json +54 -25
- package/app/desktop/preload.js +7 -0
- package/app/desktop/src/core/error-handler.js +108 -0
- package/app/desktop/src/core/event-emitter.js +84 -0
- package/app/desktop/src/core/logger.js +108 -0
- package/app/desktop/src/core/state-manager.js +125 -0
- package/app/desktop/src/services/module-loader.js +214 -0
- package/app/desktop/src/services/runtime-manager.js +301 -0
- package/app/desktop/src/services/service-manager.js +169 -0
- package/app/desktop/src/services/update-manager.js +268 -0
- package/app/desktop/src/ui/about-dialog-manager.js +208 -0
- package/app/desktop/src/ui/admin-window-manager.js +757 -0
- package/app/desktop/src/ui/splash-manager.js +253 -0
- package/app/desktop/src/ui/tray-manager.js +186 -0
- package/app/desktop/src/utils/icon-manager.js +133 -0
- package/app/desktop/src/utils/path-utils.js +58 -0
- package/app/desktop/src/utils/resource-paths.js +49 -0
- package/app/desktop/src/utils/resource-sync.js +260 -0
- package/app/desktop/src/utils/runtime-sync.js +241 -0
- package/app/desktop/src/utils/template-renderer.js +284 -0
- package/app/desktop/src/utils/version-utils.js +59 -0
- package/examples/prompts/engineer/engineer-professional.yaml +92 -0
- package/examples/prompts/engineer/laowang-engineer.yaml +132 -0
- package/examples/prompts/engineer/nekomata-engineer.yaml +123 -0
- package/examples/prompts/engineer/ojousama-engineer.yaml +124 -0
- package/examples/prompts/recommend/human_3-0_growth_diagnostic_coach_prompt.yaml +105 -0
- package/examples/prompts/workflow/sixstep-workflow.yaml +192 -0
- package/package.json +18 -9
- package/packages/admin-ui/.babelrc +3 -0
- package/packages/admin-ui/admin.html +237 -4784
- package/packages/admin-ui/css/main.css +2592 -0
- package/packages/admin-ui/css/recommended-prompts.css +610 -0
- package/packages/admin-ui/package-lock.json +6981 -0
- package/packages/admin-ui/package.json +36 -0
- package/packages/admin-ui/src/codemirror.js +53 -0
- package/packages/admin-ui/src/index.js +3188 -0
- package/packages/admin-ui/webpack.config.js +76 -0
- package/packages/resources/tools/chrome-devtools/README.md +310 -0
- package/packages/resources/tools/chrome-devtools/chrome-devtools.tool.js +1703 -0
- package/packages/resources/tools/file-reader/README.md +289 -0
- package/packages/resources/tools/file-reader/file-reader.tool.js +1545 -0
- package/packages/resources/tools/filesystem/README.md +359 -0
- package/packages/resources/tools/filesystem/filesystem.tool.js +538 -0
- package/packages/resources/tools/ollama-remote/README.md +192 -0
- package/packages/resources/tools/ollama-remote/ollama-remote.tool.js +421 -0
- package/packages/resources/tools/pdf-reader/README.md +236 -0
- package/packages/resources/tools/pdf-reader/pdf-reader.tool.js +565 -0
- package/packages/resources/tools/playwright/README.md +306 -0
- package/packages/resources/tools/playwright/playwright.tool.js +1186 -0
- package/packages/resources/tools/todolist/README.md +394 -0
- package/packages/resources/tools/todolist/todolist.tool.js +1312 -0
- package/packages/server/README.md +142 -0
- package/packages/server/api/admin.routes.js +42 -11
- package/packages/server/api/surge.routes.js +43 -0
- package/packages/server/app.js +119 -14
- package/packages/server/index.js +39 -0
- package/packages/server/mcp/mcp.server.js +346 -28
- package/packages/server/mcp/{mcp.handler.js → prompt.handler.js} +108 -9
- package/packages/server/mcp/sequential-thinking.handler.js +318 -0
- package/packages/server/mcp/think-plan.handler.js +274 -0
- package/packages/server/middlewares/auth.middleware.js +6 -0
- package/packages/server/package.json +51 -0
- package/packages/server/server.js +37 -1
- package/packages/server/toolm/index.js +9 -0
- package/packages/server/toolm/package-installer.service.js +267 -0
- package/packages/server/toolm/test-tools.js +264 -0
- package/packages/server/toolm/tool-context.service.js +334 -0
- package/packages/server/toolm/tool-dependency.service.js +168 -0
- package/packages/server/toolm/tool-description-generator-optimized.service.js +375 -0
- package/packages/server/toolm/tool-description-generator.service.js +312 -0
- package/packages/server/toolm/tool-environment.service.js +200 -0
- package/packages/server/toolm/tool-execution.service.js +277 -0
- package/packages/server/toolm/tool-loader.service.js +219 -0
- package/packages/server/toolm/tool-logger.service.js +223 -0
- package/packages/server/toolm/tool-manager.handler.js +65 -0
- package/packages/server/toolm/tool-manual-generator.service.js +389 -0
- package/packages/server/toolm/tool-mode-handlers.service.js +224 -0
- package/packages/server/toolm/tool-storage.service.js +111 -0
- package/packages/server/toolm/tool-sync.service.js +138 -0
- package/packages/server/toolm/tool-utils.js +20 -0
- package/packages/server/toolm/tool-yaml-parser.service.js +81 -0
- package/packages/server/toolm/validate-system.js +421 -0
- package/packages/server/utils/config.js +49 -5
- package/packages/server/utils/util.js +65 -10
- package/scripts/build-icons.js +135 -0
- package/scripts/build.sh +57 -0
- package/scripts/surge/CNAME +1 -0
- package/scripts/surge/README.md +47 -0
- package/scripts/surge/package-lock.json +34 -0
- package/scripts/surge/package.json +20 -0
- package/scripts/surge/sync-to-surge.js +151 -0
- package/packages/admin-ui/js/closebrackets.min.js +0 -8
- package/packages/admin-ui/js/codemirror.min.js +0 -8
- package/packages/admin-ui/js/js-yaml.min.js +0 -2
- package/packages/admin-ui/js/markdown.min.js +0 -8
- /package/app/desktop/assets/{icon.png → tray.1.png} +0 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# Ollama Remote Tool
|
|
2
|
+
|
|
3
|
+
远程 Ollama 服务器交互工具,支持列出模型和发送对话请求。
|
|
4
|
+
|
|
5
|
+
## 功能特性
|
|
6
|
+
|
|
7
|
+
1. **列出模型** (`list_models`)
|
|
8
|
+
- 列出远程 Ollama 服务器上所有可用的模型
|
|
9
|
+
- 支持过滤只显示云端模型
|
|
10
|
+
- 显示模型大小、更新时间等信息
|
|
11
|
+
|
|
12
|
+
2. **对话功能** (`chat`)
|
|
13
|
+
- 向远程 Ollama 服务器发送对话请求
|
|
14
|
+
- 支持自定义系统提示词
|
|
15
|
+
- 支持调整模型温度参数
|
|
16
|
+
- 返回完整的对话响应
|
|
17
|
+
|
|
18
|
+
## 环境变量配置
|
|
19
|
+
|
|
20
|
+
工具需要配置以下环境变量:
|
|
21
|
+
|
|
22
|
+
- `OLLAMA_BASE_URL` (必需): Ollama 服务器基础 URL
|
|
23
|
+
- 默认值: `http://localhost:11434`
|
|
24
|
+
- 示例: `http://192.168.1.100:11434`
|
|
25
|
+
|
|
26
|
+
- `OLLAMA_API_KEY` (可选): API 密钥,用于 Bearer Token 认证
|
|
27
|
+
- 默认值: 空字符串
|
|
28
|
+
- 如果配置,会通过 `Authorization: Bearer {API_KEY}` 方式传递
|
|
29
|
+
|
|
30
|
+
## 使用方法
|
|
31
|
+
|
|
32
|
+
### 1. 配置环境变量
|
|
33
|
+
|
|
34
|
+
```yaml
|
|
35
|
+
tool: tool://ollama-remote
|
|
36
|
+
mode: configure
|
|
37
|
+
parameters:
|
|
38
|
+
OLLAMA_BASE_URL: "http://localhost:11434"
|
|
39
|
+
OLLAMA_API_KEY: "your-api-key-here" # 可选
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### 2. 列出可用模型
|
|
43
|
+
|
|
44
|
+
```yaml
|
|
45
|
+
tool: tool://ollama-remote
|
|
46
|
+
mode: execute
|
|
47
|
+
parameters:
|
|
48
|
+
method: list_models
|
|
49
|
+
only_remote: false # 可选,只显示云端模型
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### 3. 与模型对话
|
|
53
|
+
|
|
54
|
+
```yaml
|
|
55
|
+
tool: tool://ollama-remote
|
|
56
|
+
mode: execute
|
|
57
|
+
parameters:
|
|
58
|
+
method: chat
|
|
59
|
+
model: "llama3"
|
|
60
|
+
message: "你好,请介绍一下你自己"
|
|
61
|
+
system_prompt: "你是一个有用的AI助手" # 可选
|
|
62
|
+
temperature: 0.7 # 可选,0-1之间
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## 参数说明
|
|
66
|
+
|
|
67
|
+
### list_models 方法
|
|
68
|
+
|
|
69
|
+
- `method` (必需): 必须为 `"list_models"`
|
|
70
|
+
- `only_remote` (可选): 布尔值,是否只显示云端模型,默认 `false`
|
|
71
|
+
|
|
72
|
+
### chat 方法
|
|
73
|
+
|
|
74
|
+
- `method` (必需): 必须为 `"chat"`
|
|
75
|
+
- `model` (必需): 模型名称,例如 `"llama3"`, `"deepseek-coder"`
|
|
76
|
+
- `message` (必需): 发送给模型的提示词或问题
|
|
77
|
+
- `system_prompt` (可选): 系统级指令
|
|
78
|
+
- `temperature` (可选): 模型温度,0-1之间,默认 `0.7`
|
|
79
|
+
|
|
80
|
+
## 返回格式
|
|
81
|
+
|
|
82
|
+
### list_models 返回
|
|
83
|
+
|
|
84
|
+
```json
|
|
85
|
+
{
|
|
86
|
+
"success": true,
|
|
87
|
+
"total": 5,
|
|
88
|
+
"displayed": 5,
|
|
89
|
+
"only_remote": false,
|
|
90
|
+
"models": [
|
|
91
|
+
{
|
|
92
|
+
"index": 1,
|
|
93
|
+
"name": "llama3:latest",
|
|
94
|
+
"size": "4.7 GB",
|
|
95
|
+
"modifiedAt": "2024-01-01T00:00:00Z",
|
|
96
|
+
"type": "💾 本地"
|
|
97
|
+
}
|
|
98
|
+
]
|
|
99
|
+
}
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### chat 返回
|
|
103
|
+
|
|
104
|
+
```json
|
|
105
|
+
{
|
|
106
|
+
"success": true,
|
|
107
|
+
"model": "llama3",
|
|
108
|
+
"reply": "你好!我是...",
|
|
109
|
+
"usage": {
|
|
110
|
+
"prompt_tokens": 10,
|
|
111
|
+
"completion_tokens": 20,
|
|
112
|
+
"total_tokens": 30
|
|
113
|
+
},
|
|
114
|
+
"finishReason": "stop"
|
|
115
|
+
}
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## 错误处理
|
|
119
|
+
|
|
120
|
+
工具定义了以下业务错误:
|
|
121
|
+
|
|
122
|
+
- `CONNECTION_FAILED`: 连接远程服务器失败
|
|
123
|
+
- `API_ERROR`: Ollama API 返回错误
|
|
124
|
+
- `INVALID_RESPONSE`: 无效的 API 响应
|
|
125
|
+
- `MISSING_PARAMETER`: 缺少必需参数
|
|
126
|
+
- `NO_MODELS_AVAILABLE`: 没有可用的模型
|
|
127
|
+
|
|
128
|
+
## 注意事项
|
|
129
|
+
|
|
130
|
+
1. **服务器同步**: 新添加的工具需要重启服务器才能被加载
|
|
131
|
+
2. **网络连接**: 确保可以访问配置的 Ollama 服务器地址
|
|
132
|
+
3. **API 兼容性**: 工具使用 Ollama 的 `/api/tags` 和 `/v1/chat/completions` 端点
|
|
133
|
+
4. **流式响应**: 当前实现不支持流式响应,返回完整结果
|
|
134
|
+
5. **认证方式**: API Key 通过 Bearer Token 方式传递
|
|
135
|
+
|
|
136
|
+
## 测试步骤
|
|
137
|
+
|
|
138
|
+
1. **重启服务器**(如果服务器已在运行)
|
|
139
|
+
```bash
|
|
140
|
+
# 停止当前服务器,然后重新启动
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
2. **查看工具手册**
|
|
144
|
+
```yaml
|
|
145
|
+
tool: tool://ollama-remote
|
|
146
|
+
mode: manual
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
3. **配置环境变量**
|
|
150
|
+
```yaml
|
|
151
|
+
tool: tool://ollama-remote
|
|
152
|
+
mode: configure
|
|
153
|
+
parameters:
|
|
154
|
+
OLLAMA_BASE_URL: "http://localhost:11434"
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
4. **测试列出模型**
|
|
158
|
+
```yaml
|
|
159
|
+
tool: tool://ollama-remote
|
|
160
|
+
mode: execute
|
|
161
|
+
parameters:
|
|
162
|
+
method: list_models
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
5. **测试对话功能**(需要确保有可用的模型)
|
|
166
|
+
```yaml
|
|
167
|
+
tool: tool://ollama-remote
|
|
168
|
+
mode: execute
|
|
169
|
+
parameters:
|
|
170
|
+
method: chat
|
|
171
|
+
model: "llama3"
|
|
172
|
+
message: "Hello, world!"
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## 开发说明
|
|
176
|
+
|
|
177
|
+
工具基于参考实现 [ollama-remote-mcp](https://github.com/yangweijie/ollama-remote-mcp) 开发,遵循 Prompt Manager 工具开发规范:
|
|
178
|
+
|
|
179
|
+
- 使用 ES6 模块格式 (`export default`)
|
|
180
|
+
- 实现必需方法 `execute()`
|
|
181
|
+
- 实现推荐方法:`getDependencies()`, `getMetadata()`, `getSchema()`, `getBusinessErrors()`
|
|
182
|
+
- 完整的错误处理和日志记录
|
|
183
|
+
- 符合工具开发指南的所有要求
|
|
184
|
+
|
|
185
|
+
## 版本历史
|
|
186
|
+
|
|
187
|
+
- **1.0.0** (2025-12-01): 初始版本
|
|
188
|
+
- 实现列出模型功能
|
|
189
|
+
- 实现对话功能
|
|
190
|
+
- 支持环境变量配置
|
|
191
|
+
- 完整的错误处理
|
|
192
|
+
|
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama Remote Tool - 远程 Ollama 服务器交互工具
|
|
3
|
+
*
|
|
4
|
+
* 功能说明:
|
|
5
|
+
* - 列出远程 Ollama 服务器上所有可用的模型
|
|
6
|
+
* - 向远程 Ollama 服务器发送对话请求
|
|
7
|
+
*
|
|
8
|
+
* 注意:此工具将在独立沙箱环境中运行
|
|
9
|
+
* 需要配置 OLLAMA_BASE_URL 和可选的 OLLAMA_API_KEY 环境变量
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
export default {
|
|
13
|
+
/**
|
|
14
|
+
* 获取工具依赖
|
|
15
|
+
* 使用 Node.js 内置模块和 fetch API,无需额外依赖
|
|
16
|
+
*/
|
|
17
|
+
getDependencies() {
|
|
18
|
+
return {
|
|
19
|
+
// 使用 Node.js 内置模块和全局 fetch,无需额外依赖
|
|
20
|
+
};
|
|
21
|
+
},
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* 获取工具元信息
|
|
25
|
+
*/
|
|
26
|
+
getMetadata() {
|
|
27
|
+
return {
|
|
28
|
+
id: 'ollama-remote',
|
|
29
|
+
name: 'Ollama Remote',
|
|
30
|
+
description: '远程 Ollama 服务器交互工具,支持列出模型和发送对话请求',
|
|
31
|
+
version: '1.0.0',
|
|
32
|
+
category: 'ai',
|
|
33
|
+
author: 'Prompt Manager',
|
|
34
|
+
tags: ['ollama', 'ai', 'llm', 'remote', 'chat'],
|
|
35
|
+
scenarios: [
|
|
36
|
+
'列出远程 Ollama 服务器上的可用模型',
|
|
37
|
+
'与远程 Ollama 模型进行对话',
|
|
38
|
+
'使用自定义系统提示词进行对话',
|
|
39
|
+
'调整模型温度参数'
|
|
40
|
+
],
|
|
41
|
+
limitations: [
|
|
42
|
+
'需要配置 OLLAMA_BASE_URL 环境变量',
|
|
43
|
+
'需要确保网络可以访问远程 Ollama 服务器',
|
|
44
|
+
'不支持流式响应(返回完整结果)',
|
|
45
|
+
'API Key 通过 Bearer Token 方式传递'
|
|
46
|
+
]
|
|
47
|
+
};
|
|
48
|
+
},
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* 获取参数 Schema
|
|
52
|
+
*/
|
|
53
|
+
getSchema() {
|
|
54
|
+
return {
|
|
55
|
+
parameters: {
|
|
56
|
+
type: 'object',
|
|
57
|
+
properties: {
|
|
58
|
+
method: {
|
|
59
|
+
type: 'string',
|
|
60
|
+
description: '操作方法',
|
|
61
|
+
enum: ['list_models', 'chat'],
|
|
62
|
+
default: 'list_models'
|
|
63
|
+
},
|
|
64
|
+
// list_models 方法的参数
|
|
65
|
+
only_remote: {
|
|
66
|
+
type: 'boolean',
|
|
67
|
+
description: '只显示云端模型信息(仅用于 list_models 方法)',
|
|
68
|
+
default: false
|
|
69
|
+
},
|
|
70
|
+
// chat 方法的参数
|
|
71
|
+
model: {
|
|
72
|
+
type: 'string',
|
|
73
|
+
description: '要使用的模型名称(chat 方法必需),例如 "llama3", "deepseek-coder"'
|
|
74
|
+
},
|
|
75
|
+
message: {
|
|
76
|
+
type: 'string',
|
|
77
|
+
description: '发送给模型的提示词或问题(chat 方法必需)'
|
|
78
|
+
},
|
|
79
|
+
system_prompt: {
|
|
80
|
+
type: 'string',
|
|
81
|
+
description: '可选的系统级指令(仅用于 chat 方法)'
|
|
82
|
+
},
|
|
83
|
+
temperature: {
|
|
84
|
+
type: 'number',
|
|
85
|
+
description: '模型温度,0-1之间(仅用于 chat 方法)',
|
|
86
|
+
minimum: 0,
|
|
87
|
+
maximum: 1,
|
|
88
|
+
default: 0.7
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
required: ['method']
|
|
92
|
+
},
|
|
93
|
+
environment: {
|
|
94
|
+
type: 'object',
|
|
95
|
+
properties: {
|
|
96
|
+
OLLAMA_BASE_URL: {
|
|
97
|
+
type: 'string',
|
|
98
|
+
description: 'Ollama 服务器基础 URL,例如 http://localhost:11434',
|
|
99
|
+
default: 'http://localhost:11434'
|
|
100
|
+
},
|
|
101
|
+
OLLAMA_API_KEY: {
|
|
102
|
+
type: 'string',
|
|
103
|
+
description: 'Ollama API 密钥(可选),用于 Bearer Token 认证',
|
|
104
|
+
default: ''
|
|
105
|
+
}
|
|
106
|
+
},
|
|
107
|
+
required: []
|
|
108
|
+
}
|
|
109
|
+
};
|
|
110
|
+
},
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* 获取业务错误定义
|
|
114
|
+
*/
|
|
115
|
+
getBusinessErrors() {
|
|
116
|
+
return [
|
|
117
|
+
{
|
|
118
|
+
code: 'CONNECTION_FAILED',
|
|
119
|
+
description: '连接远程 Ollama 服务器失败',
|
|
120
|
+
match: /连接失败|Connection Failed|ECONNREFUSED|ENOTFOUND|ETIMEDOUT/i,
|
|
121
|
+
solution: '请检查 OLLAMA_BASE_URL 配置和网络连接',
|
|
122
|
+
retryable: true
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
code: 'API_ERROR',
|
|
126
|
+
description: 'Ollama API 返回错误',
|
|
127
|
+
match: /Error: Ollama API responded with status/i,
|
|
128
|
+
solution: '请检查 API 密钥和服务器状态',
|
|
129
|
+
retryable: true
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
code: 'INVALID_RESPONSE',
|
|
133
|
+
description: '无效的 API 响应',
|
|
134
|
+
match: /无效的响应|Invalid response|No content returned/i,
|
|
135
|
+
solution: '请检查服务器响应格式是否正确',
|
|
136
|
+
retryable: true
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
code: 'MISSING_PARAMETER',
|
|
140
|
+
description: '缺少必需参数',
|
|
141
|
+
match: /缺少必需参数|Missing required parameter/i,
|
|
142
|
+
solution: '请检查方法参数是否完整',
|
|
143
|
+
retryable: false
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
code: 'NO_MODELS_AVAILABLE',
|
|
147
|
+
description: '没有可用的模型',
|
|
148
|
+
match: /当前 Ollama 服务器上没有可用的模型/i,
|
|
149
|
+
solution: '请先在 Ollama 服务器上下载模型(使用 ollama pull <模型名>)',
|
|
150
|
+
retryable: false
|
|
151
|
+
}
|
|
152
|
+
];
|
|
153
|
+
},
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* 格式化文件大小
|
|
157
|
+
*/
|
|
158
|
+
formatSize(bytes) {
|
|
159
|
+
if (bytes === 0) return '0 B';
|
|
160
|
+
const k = 1024;
|
|
161
|
+
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
|
162
|
+
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
163
|
+
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
|
|
164
|
+
},
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* 获取 Ollama 基础 URL
|
|
168
|
+
*/
|
|
169
|
+
getOllamaBaseUrl() {
|
|
170
|
+
const { api } = this;
|
|
171
|
+
const baseUrl = api.environment.get('OLLAMA_BASE_URL') || 'http://localhost:11434';
|
|
172
|
+
// 移除末尾的斜杠
|
|
173
|
+
return baseUrl.replace(/\/$/, '');
|
|
174
|
+
},
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* 获取 Ollama API Key
|
|
178
|
+
*/
|
|
179
|
+
getOllamaApiKey() {
|
|
180
|
+
const { api } = this;
|
|
181
|
+
return api.environment.get('OLLAMA_API_KEY') || '';
|
|
182
|
+
},
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* 构建请求头
|
|
186
|
+
*/
|
|
187
|
+
buildHeaders() {
|
|
188
|
+
const headers = {
|
|
189
|
+
'Content-Type': 'application/json'
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
const apiKey = this.getOllamaApiKey();
|
|
193
|
+
if (apiKey) {
|
|
194
|
+
headers['Authorization'] = `Bearer ${apiKey}`;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return headers;
|
|
198
|
+
},
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* 列出 Ollama 模型
|
|
202
|
+
*/
|
|
203
|
+
async listModels(params) {
|
|
204
|
+
const { api } = this;
|
|
205
|
+
|
|
206
|
+
api?.logger?.info('开始列出 Ollama 模型', {
|
|
207
|
+
only_remote: params.only_remote || false
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
try {
|
|
211
|
+
const baseUrl = this.getOllamaBaseUrl();
|
|
212
|
+
const url = `${baseUrl}/api/tags`;
|
|
213
|
+
const headers = this.buildHeaders();
|
|
214
|
+
|
|
215
|
+
api?.logger?.debug('请求 URL', { url });
|
|
216
|
+
|
|
217
|
+
const response = await fetch(url, {
|
|
218
|
+
method: 'GET',
|
|
219
|
+
headers
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
if (!response.ok) {
|
|
223
|
+
const errorText = await response.text().catch(() => '');
|
|
224
|
+
throw new Error(`Ollama API responded with status ${response.status}: ${response.statusText}. ${errorText}`);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
const data = await response.json();
|
|
228
|
+
const models = data.models || [];
|
|
229
|
+
|
|
230
|
+
if (models.length === 0) {
|
|
231
|
+
return {
|
|
232
|
+
success: true,
|
|
233
|
+
message: '当前 Ollama 服务器上没有可用的模型。\n\n您可以通过以下方式获取模型:\n1. 本地模型:使用 `ollama pull <模型名>` 下载模型\n2. 云端模型:配置 Ollama Cloud 账户或代理服务',
|
|
234
|
+
models: [],
|
|
235
|
+
total: 0
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// 格式化模型列表
|
|
240
|
+
const modelList = models.map((model) => {
|
|
241
|
+
const name = model.name;
|
|
242
|
+
const size = model.size;
|
|
243
|
+
const modifiedAt = model.modified_at;
|
|
244
|
+
|
|
245
|
+
// 检测是否为云端模型(一般云端模型会有特定的命名模式)
|
|
246
|
+
const isRemoteModel = name.includes('cloud') ||
|
|
247
|
+
name.includes('online') ||
|
|
248
|
+
name.includes('api') ||
|
|
249
|
+
name.includes('remote') ||
|
|
250
|
+
(name.includes('llama3') && !name.includes(':'));
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
name,
|
|
254
|
+
size: this.formatSize(size),
|
|
255
|
+
sizeBytes: size,
|
|
256
|
+
modifiedAt,
|
|
257
|
+
isRemoteModel
|
|
258
|
+
};
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
// 如果只显示云端模型,过滤结果
|
|
262
|
+
const filteredModels = params.only_remote
|
|
263
|
+
? modelList.filter(model => model.isRemoteModel)
|
|
264
|
+
: modelList;
|
|
265
|
+
|
|
266
|
+
const displayModels = filteredModels.length > 0 ? filteredModels : modelList;
|
|
267
|
+
|
|
268
|
+
api?.logger?.info('模型列表获取成功', {
|
|
269
|
+
total: models.length,
|
|
270
|
+
displayed: displayModels.length,
|
|
271
|
+
only_remote: params.only_remote || false
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
return {
|
|
275
|
+
success: true,
|
|
276
|
+
total: models.length,
|
|
277
|
+
displayed: displayModels.length,
|
|
278
|
+
only_remote: params.only_remote || false,
|
|
279
|
+
models: displayModels.map((model, index) => ({
|
|
280
|
+
index: index + 1,
|
|
281
|
+
name: model.name,
|
|
282
|
+
size: model.size,
|
|
283
|
+
modifiedAt: model.modifiedAt,
|
|
284
|
+
type: model.isRemoteModel ? '☁️ 云端' : '💾 本地'
|
|
285
|
+
}))
|
|
286
|
+
};
|
|
287
|
+
|
|
288
|
+
} catch (error) {
|
|
289
|
+
api?.logger?.error('列出模型失败', {
|
|
290
|
+
error: error.message,
|
|
291
|
+
stack: error.stack
|
|
292
|
+
});
|
|
293
|
+
throw error;
|
|
294
|
+
}
|
|
295
|
+
},
|
|
296
|
+
|
|
297
|
+
/**
|
|
298
|
+
* 与 Ollama 对话
|
|
299
|
+
*/
|
|
300
|
+
async chat(params) {
|
|
301
|
+
const { api } = this;
|
|
302
|
+
|
|
303
|
+
// 验证必需参数
|
|
304
|
+
if (!params.model) {
|
|
305
|
+
throw new Error('缺少必需参数: model');
|
|
306
|
+
}
|
|
307
|
+
if (!params.message) {
|
|
308
|
+
throw new Error('缺少必需参数: message');
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
api?.logger?.info('开始与 Ollama 对话', {
|
|
312
|
+
model: params.model,
|
|
313
|
+
hasSystemPrompt: !!params.system_prompt,
|
|
314
|
+
temperature: params.temperature || 0.7
|
|
315
|
+
});
|
|
316
|
+
|
|
317
|
+
try {
|
|
318
|
+
const baseUrl = this.getOllamaBaseUrl();
|
|
319
|
+
const url = `${baseUrl}/v1/chat/completions`;
|
|
320
|
+
const headers = this.buildHeaders();
|
|
321
|
+
|
|
322
|
+
// 构建请求体
|
|
323
|
+
const body = {
|
|
324
|
+
model: params.model,
|
|
325
|
+
messages: [
|
|
326
|
+
...(params.system_prompt ? [{ role: 'system', content: params.system_prompt }] : []),
|
|
327
|
+
{ role: 'user', content: params.message }
|
|
328
|
+
],
|
|
329
|
+
stream: false, // MCP 工具通常需要一次性返回结果,关闭流式传输
|
|
330
|
+
options: {
|
|
331
|
+
temperature: params.temperature || 0.7
|
|
332
|
+
}
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
api?.logger?.debug('请求配置', {
|
|
336
|
+
url,
|
|
337
|
+
model: params.model,
|
|
338
|
+
messageLength: params.message.length,
|
|
339
|
+
hasSystemPrompt: !!params.system_prompt
|
|
340
|
+
});
|
|
341
|
+
|
|
342
|
+
const response = await fetch(url, {
|
|
343
|
+
method: 'POST',
|
|
344
|
+
headers,
|
|
345
|
+
body: JSON.stringify(body)
|
|
346
|
+
});
|
|
347
|
+
|
|
348
|
+
if (!response.ok) {
|
|
349
|
+
const errorText = await response.text().catch(() => '');
|
|
350
|
+
throw new Error(`Ollama API responded with status ${response.status}: ${response.statusText}. ${errorText}`);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
const data = await response.json();
|
|
354
|
+
const reply = data.choices?.[0]?.message?.content || 'No content returned';
|
|
355
|
+
|
|
356
|
+
api?.logger?.info('对话成功', {
|
|
357
|
+
model: params.model,
|
|
358
|
+
replyLength: reply.length
|
|
359
|
+
});
|
|
360
|
+
|
|
361
|
+
return {
|
|
362
|
+
success: true,
|
|
363
|
+
model: params.model,
|
|
364
|
+
reply: reply,
|
|
365
|
+
usage: data.usage || null,
|
|
366
|
+
finishReason: data.choices?.[0]?.finish_reason || null
|
|
367
|
+
};
|
|
368
|
+
|
|
369
|
+
} catch (error) {
|
|
370
|
+
api?.logger?.error('对话失败', {
|
|
371
|
+
error: error.message,
|
|
372
|
+
stack: error.stack,
|
|
373
|
+
model: params.model
|
|
374
|
+
});
|
|
375
|
+
throw error;
|
|
376
|
+
}
|
|
377
|
+
},
|
|
378
|
+
|
|
379
|
+
/**
|
|
380
|
+
* 执行工具
|
|
381
|
+
*/
|
|
382
|
+
async execute(params) {
|
|
383
|
+
const { api } = this;
|
|
384
|
+
|
|
385
|
+
// 记录执行开始
|
|
386
|
+
api?.logger?.info('执行开始', {
|
|
387
|
+
tool: this.__toolName,
|
|
388
|
+
method: params.method,
|
|
389
|
+
params: Object.keys(params)
|
|
390
|
+
});
|
|
391
|
+
|
|
392
|
+
try {
|
|
393
|
+
// 参数验证
|
|
394
|
+
if (!params.method) {
|
|
395
|
+
throw new Error('缺少必需参数: method');
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// 根据方法执行相应操作
|
|
399
|
+
switch (params.method) {
|
|
400
|
+
case 'list_models':
|
|
401
|
+
return await this.listModels(params);
|
|
402
|
+
|
|
403
|
+
case 'chat':
|
|
404
|
+
return await this.chat(params);
|
|
405
|
+
|
|
406
|
+
default:
|
|
407
|
+
throw new Error(`不支持的方法: ${params.method}`);
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
} catch (error) {
|
|
411
|
+
// 错误处理和日志记录
|
|
412
|
+
api?.logger?.error('执行失败', {
|
|
413
|
+
error: error.message,
|
|
414
|
+
stack: error.stack,
|
|
415
|
+
method: params.method
|
|
416
|
+
});
|
|
417
|
+
throw error;
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
};
|
|
421
|
+
|