@simonyea/holysheep-cli 1.6.9 → 1.6.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -218,6 +218,8 @@ A: OpenClaw 需要 Node.js 20+,运行 `node --version` 确认版本后重试
218
218
 
219
219
  ## Changelog
220
220
 
221
+ - **v1.6.11** — OpenClaw 新增本地 HolySheep Bridge,统一暴露单一 `holysheep` provider 以支持自由切换 GPT / Claude / MiniMax;同时保留用户所选默认模型,不再强制 GPT-5.4 作为 primary
222
+ - **v1.6.10** — 将可运行的 OpenClaw runtime(含 npx 回退)视为已安装,避免 Windows/Node 环境下重复提示安装;同时修复 Droid CLI 的 GPT `/v1` 接入地址并同步写入 `~/.factory/config.json`
221
223
  - **v1.6.9** — 保留 OpenClaw 的 MiniMax 配置,并为 MiniMax 使用独立 provider id,避免与 Claude provider 冲突;在 OpenClaw 2026.3.13 下改为提示精确 `/model` 切换命令,而不是停止配置 MiniMax
222
224
  - **v1.6.8** — 修复 Codex 重复写入 `config.toml` 导致的 duplicate key,并修复 OpenClaw 在 Windows 下的安装检测;针对 OpenClaw 2026.3.13 的模型路由回归,临时跳过 MiniMax 避免 `model not allowed`
223
225
  - **v1.6.7** — OpenClaw 配置新增 `MiniMax-M2.7-highspeed`,并补齐节点迁移脚本中的 SSH 代理账号创建逻辑
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simonyea/holysheep-cli",
3
- "version": "1.6.9",
3
+ "version": "1.6.11",
4
4
  "description": "Claude Code/Cursor/Cline API relay for China — ¥1=$1, WeChat/Alipay payment, no credit card, no VPN. One command setup for all AI coding tools.",
5
5
  "keywords": [
6
6
  "openai-china",
package/src/index.js CHANGED
@@ -154,6 +154,19 @@ program
154
154
  })
155
155
  })
156
156
 
157
+ // ── openclaw-bridge ──────────────────────────────────────────────────────────
158
+ program
159
+ .command('openclaw-bridge')
160
+ .description('启动 HolySheep 的 OpenClaw 本地桥接服务')
161
+ .option('--port <port>', '指定桥接服务端口')
162
+ .action((opts) => {
163
+ const { startBridge } = require('./tools/openclaw-bridge')
164
+ startBridge({
165
+ port: opts.port ? Number(opts.port) : null,
166
+ host: '127.0.0.1',
167
+ })
168
+ })
169
+
157
170
  // 默认:无命令时显示帮助 + 提示 setup
158
171
  program
159
172
  .action(() => {
@@ -3,7 +3,7 @@
3
3
  * 配置文件: ~/.factory/settings.json
4
4
  *
5
5
  * 使用 Droid 原生 customModels 配置 HolySheep 的多个模型入口:
6
- * - GPT 走 OpenAI 兼容入口: https://api.holysheep.ai/openai
6
+ * - GPT 走 OpenAI 兼容入口: https://api.holysheep.ai/v1
7
7
  * - Claude 走 Anthropic 入口: https://api.holysheep.ai
8
8
  * - MiniMax 走 Anthropic 入口: https://api.holysheep.ai/minimax
9
9
  */
@@ -13,12 +13,13 @@ const os = require('os')
13
13
 
14
14
  const CONFIG_DIR = path.join(os.homedir(), '.factory')
15
15
  const SETTINGS_FILE = path.join(CONFIG_DIR, 'settings.json')
16
+ const LEGACY_CONFIG_FILE = path.join(CONFIG_DIR, 'config.json')
16
17
 
17
18
  const DEFAULT_MODELS = [
18
19
  {
19
20
  model: 'gpt-5.4',
20
21
  id: 'custom:gpt-5.4-0',
21
- baseUrlSuffix: '/openai',
22
+ baseUrlSuffix: '',
22
23
  displayName: 'GPT-5.4',
23
24
  provider: 'openai',
24
25
  },
@@ -66,6 +67,24 @@ function writeSettings(data) {
66
67
  fs.writeFileSync(SETTINGS_FILE, JSON.stringify(data, null, 2), 'utf8')
67
68
  }
68
69
 
70
+ function readLegacyConfig() {
71
+ try {
72
+ if (fs.existsSync(LEGACY_CONFIG_FILE)) {
73
+ return JSON.parse(fs.readFileSync(LEGACY_CONFIG_FILE, 'utf8'))
74
+ }
75
+ } catch {}
76
+ return {}
77
+ }
78
+
79
+ function writeLegacyConfig(data) {
80
+ fs.mkdirSync(CONFIG_DIR, { recursive: true })
81
+ fs.writeFileSync(LEGACY_CONFIG_FILE, JSON.stringify(data, null, 2), 'utf8')
82
+ }
83
+
84
+ function isHolySheepModel(item) {
85
+ return typeof item?.baseUrl === 'string' && item.baseUrl.includes('api.holysheep.ai')
86
+ }
87
+
69
88
  function normalizeSelectedModels(selectedModels) {
70
89
  const selected = new Set(
71
90
  Array.isArray(selectedModels) && selectedModels.length > 0
@@ -84,13 +103,17 @@ function normalizeSelectedModels(selectedModels) {
84
103
  return models.length > 0 ? models : DEFAULT_MODELS.map((item, index) => ({ ...item, index }))
85
104
  }
86
105
 
87
- function buildCustomModels(apiKey, baseUrlAnthropic, selectedModels) {
88
- const rootUrl = String(baseUrlAnthropic || '').replace(/\/+$/, '')
106
+ function buildCustomModels(apiKey, baseUrlAnthropic, baseUrlOpenAI, selectedModels) {
107
+ const anthropicRootUrl = String(baseUrlAnthropic || '').replace(/\/+$/, '')
108
+ const openaiRootUrl = String(baseUrlOpenAI || '').replace(/\/+$/, '')
89
109
  return normalizeSelectedModels(selectedModels).map((item) => ({
90
110
  model: item.model,
91
111
  id: item.id,
92
112
  index: item.index,
93
- baseUrl: `${rootUrl}${item.baseUrlSuffix}`,
113
+ baseUrl:
114
+ item.provider === 'openai'
115
+ ? `${openaiRootUrl}${item.baseUrlSuffix}`
116
+ : `${anthropicRootUrl}${item.baseUrlSuffix}`,
94
117
  apiKey,
95
118
  displayName: item.displayName,
96
119
  maxOutputTokens: 64000,
@@ -108,25 +131,37 @@ module.exports = {
108
131
  isConfigured() {
109
132
  const settings = readSettings()
110
133
  const customModels = Array.isArray(settings.customModels) ? settings.customModels : []
111
- return customModels.some((item) =>
112
- typeof item.baseUrl === 'string' && item.baseUrl.includes('api.holysheep.ai')
113
- )
134
+ if (customModels.some(isHolySheepModel)) return true
135
+
136
+ const legacy = readLegacyConfig()
137
+ const legacyModels = Array.isArray(legacy.customModels) ? legacy.customModels : []
138
+ return legacyModels.some(isHolySheepModel)
114
139
  },
115
- configure(apiKey, baseUrlAnthropic, _baseUrlOpenAI, _primaryModel, selectedModels) {
140
+ configure(apiKey, baseUrlAnthropic, baseUrlOpenAI, _primaryModel, selectedModels) {
141
+ const nextModels = buildCustomModels(apiKey, baseUrlAnthropic, baseUrlOpenAI, selectedModels)
142
+
116
143
  const settings = readSettings()
117
144
  const preservedModels = Array.isArray(settings.customModels)
118
- ? settings.customModels.filter(
119
- (item) => !(typeof item.baseUrl === 'string' && item.baseUrl.includes('api.holysheep.ai'))
120
- )
145
+ ? settings.customModels.filter((item) => !isHolySheepModel(item))
121
146
  : []
122
-
123
147
  settings.customModels = [
124
- ...buildCustomModels(apiKey, baseUrlAnthropic, selectedModels),
148
+ ...nextModels,
125
149
  ...preservedModels,
126
150
  ]
127
151
  settings.logoAnimation = 'off'
128
152
  writeSettings(settings)
129
153
 
154
+ const legacy = readLegacyConfig()
155
+ const preservedLegacyModels = Array.isArray(legacy.customModels)
156
+ ? legacy.customModels.filter((item) => !isHolySheepModel(item))
157
+ : []
158
+ legacy.customModels = [
159
+ ...nextModels,
160
+ ...preservedLegacyModels,
161
+ ]
162
+ legacy.logoAnimation = 'off'
163
+ writeLegacyConfig(legacy)
164
+
130
165
  return {
131
166
  file: SETTINGS_FILE,
132
167
  hot: true,
@@ -135,11 +170,15 @@ module.exports = {
135
170
  reset() {
136
171
  const settings = readSettings()
137
172
  if (Array.isArray(settings.customModels)) {
138
- settings.customModels = settings.customModels.filter(
139
- (item) => !(typeof item.baseUrl === 'string' && item.baseUrl.includes('api.holysheep.ai'))
140
- )
173
+ settings.customModels = settings.customModels.filter((item) => !isHolySheepModel(item))
141
174
  }
142
175
  writeSettings(settings)
176
+
177
+ const legacy = readLegacyConfig()
178
+ if (Array.isArray(legacy.customModels)) {
179
+ legacy.customModels = legacy.customModels.filter((item) => !isHolySheepModel(item))
180
+ }
181
+ writeLegacyConfig(legacy)
143
182
  },
144
183
  getConfigPath() { return SETTINGS_FILE },
145
184
  hint: '已写入 ~/.factory/settings.json;重启 Droid 后可见 HolySheep 模型列表',
@@ -0,0 +1,545 @@
1
+ #!/usr/bin/env node
2
+ 'use strict'
3
+
4
+ const fs = require('fs')
5
+ const http = require('http')
6
+ const path = require('path')
7
+ const os = require('os')
8
+ const fetch = global.fetch || require('node-fetch')
9
+
10
+ const OPENCLAW_DIR = path.join(os.homedir(), '.openclaw')
11
+ const BRIDGE_CONFIG_FILE = path.join(OPENCLAW_DIR, 'holysheep-bridge.json')
12
+
13
+ function readBridgeConfig(configPath = BRIDGE_CONFIG_FILE) {
14
+ return JSON.parse(fs.readFileSync(configPath, 'utf8'))
15
+ }
16
+
17
+ function parseArgs(argv) {
18
+ const args = { port: null, host: '127.0.0.1', config: BRIDGE_CONFIG_FILE }
19
+ for (let i = 0; i < argv.length; i++) {
20
+ const value = argv[i]
21
+ if (value === '--port') args.port = Number(argv[++i])
22
+ else if (value === '--host') args.host = argv[++i]
23
+ else if (value === '--config') args.config = argv[++i]
24
+ }
25
+ return args
26
+ }
27
+
28
+ function readJsonBody(req) {
29
+ return new Promise((resolve, reject) => {
30
+ let raw = ''
31
+ req.on('data', (chunk) => {
32
+ raw += chunk
33
+ if (raw.length > 5 * 1024 * 1024) {
34
+ reject(new Error('Request body too large'))
35
+ req.destroy()
36
+ }
37
+ })
38
+ req.on('end', () => {
39
+ if (!raw) return resolve({})
40
+ try {
41
+ resolve(JSON.parse(raw))
42
+ } catch (error) {
43
+ reject(error)
44
+ }
45
+ })
46
+ req.on('error', reject)
47
+ })
48
+ }
49
+
50
+ function sendJson(res, statusCode, payload) {
51
+ res.writeHead(statusCode, {
52
+ 'content-type': 'application/json; charset=utf-8',
53
+ 'cache-control': 'no-store',
54
+ })
55
+ res.end(JSON.stringify(payload))
56
+ }
57
+
58
+ function sendOpenAIStream(res, payload) {
59
+ const choice = payload.choices?.[0] || {}
60
+ const message = choice.message || {}
61
+ const created = payload.created || Math.floor(Date.now() / 1000)
62
+
63
+ res.writeHead(200, {
64
+ 'content-type': 'text/event-stream; charset=utf-8',
65
+ 'cache-control': 'no-cache, no-transform',
66
+ connection: 'keep-alive',
67
+ })
68
+
69
+ const firstChunk = {
70
+ id: payload.id,
71
+ object: 'chat.completion.chunk',
72
+ created,
73
+ model: payload.model,
74
+ choices: [{
75
+ index: 0,
76
+ delta: {
77
+ role: 'assistant',
78
+ ...(message.content ? { content: message.content } : {}),
79
+ ...(message.tool_calls ? { tool_calls: message.tool_calls } : {}),
80
+ },
81
+ finish_reason: null,
82
+ }],
83
+ }
84
+
85
+ const finalChunk = {
86
+ id: payload.id,
87
+ object: 'chat.completion.chunk',
88
+ created,
89
+ model: payload.model,
90
+ choices: [{ index: 0, delta: {}, finish_reason: choice.finish_reason || 'stop' }],
91
+ usage: payload.usage,
92
+ }
93
+
94
+ res.write(`data: ${JSON.stringify(firstChunk)}\n\n`)
95
+ res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
96
+ res.end('data: [DONE]\n\n')
97
+ }
98
+
99
+ function normalizeText(value) {
100
+ if (typeof value === 'string') return value
101
+ if (Array.isArray(value)) return value.map(normalizeText).filter(Boolean).join('\n')
102
+ if (value && typeof value === 'object') {
103
+ if (typeof value.text === 'string') return value.text
104
+ if (typeof value.content === 'string') return value.content
105
+ }
106
+ return value == null ? '' : String(value)
107
+ }
108
+
109
+ function parseDataUrl(url) {
110
+ const match = String(url || '').match(/^data:([^;]+);base64,(.+)$/)
111
+ if (!match) return null
112
+ return { mediaType: match[1], data: match[2] }
113
+ }
114
+
115
+ function openAIContentToAnthropicBlocks(content) {
116
+ if (typeof content === 'string') return [{ type: 'text', text: content }]
117
+ if (!Array.isArray(content)) return []
118
+
119
+ const blocks = []
120
+ for (const part of content) {
121
+ if (!part) continue
122
+ if (part.type === 'text' && typeof part.text === 'string') {
123
+ blocks.push({ type: 'text', text: part.text })
124
+ continue
125
+ }
126
+ if (part.type === 'image_url' && part.image_url?.url) {
127
+ const dataUrl = parseDataUrl(part.image_url.url)
128
+ if (dataUrl) {
129
+ blocks.push({
130
+ type: 'image',
131
+ source: { type: 'base64', media_type: dataUrl.mediaType, data: dataUrl.data },
132
+ })
133
+ }
134
+ }
135
+ }
136
+ return blocks
137
+ }
138
+
139
+ function pushAnthropicMessage(messages, role, blocks) {
140
+ if (!blocks.length) return
141
+ const previous = messages[messages.length - 1]
142
+ if (previous && previous.role === role) {
143
+ previous.content = previous.content.concat(blocks)
144
+ return
145
+ }
146
+ messages.push({ role, content: blocks })
147
+ }
148
+
149
+ function convertOpenAIToAnthropicMessages(messages) {
150
+ const anthropicMessages = []
151
+ const systemParts = []
152
+
153
+ for (const message of messages || []) {
154
+ if (!message) continue
155
+
156
+ if (message.role === 'system') {
157
+ const blocks = openAIContentToAnthropicBlocks(message.content)
158
+ if (blocks.length === 0) {
159
+ const text = normalizeText(message.content)
160
+ if (text) systemParts.push(text)
161
+ } else {
162
+ for (const block of blocks) {
163
+ if (block.type === 'text') systemParts.push(block.text)
164
+ }
165
+ }
166
+ continue
167
+ }
168
+
169
+ if (message.role === 'tool') {
170
+ pushAnthropicMessage(anthropicMessages, 'user', [{
171
+ type: 'tool_result',
172
+ tool_use_id: message.tool_call_id,
173
+ content: normalizeText(message.content),
174
+ }])
175
+ continue
176
+ }
177
+
178
+ if (message.role === 'assistant') {
179
+ const blocks = []
180
+ const textBlocks = openAIContentToAnthropicBlocks(message.content)
181
+ if (textBlocks.length) blocks.push(...textBlocks)
182
+ else if (typeof message.content === 'string' && message.content) blocks.push({ type: 'text', text: message.content })
183
+
184
+ for (const toolCall of message.tool_calls || []) {
185
+ let input = {}
186
+ try {
187
+ input = JSON.parse(toolCall.function?.arguments || '{}')
188
+ } catch {}
189
+ blocks.push({
190
+ type: 'tool_use',
191
+ id: toolCall.id,
192
+ name: toolCall.function?.name || 'tool',
193
+ input,
194
+ })
195
+ }
196
+
197
+ pushAnthropicMessage(anthropicMessages, 'assistant', blocks)
198
+ continue
199
+ }
200
+
201
+ const blocks = openAIContentToAnthropicBlocks(message.content)
202
+ if (blocks.length) pushAnthropicMessage(anthropicMessages, 'user', blocks)
203
+ else {
204
+ const text = normalizeText(message.content)
205
+ if (text) pushAnthropicMessage(anthropicMessages, 'user', [{ type: 'text', text }])
206
+ }
207
+ }
208
+
209
+ return {
210
+ system: systemParts.join('\n\n').trim() || undefined,
211
+ messages: anthropicMessages,
212
+ }
213
+ }
214
+
215
+ function convertOpenAIToolsToAnthropic(tools) {
216
+ return (tools || [])
217
+ .filter((tool) => tool?.type === 'function' && tool.function?.name)
218
+ .map((tool) => ({
219
+ name: tool.function.name,
220
+ description: tool.function.description || '',
221
+ input_schema: tool.function.parameters || { type: 'object', properties: {} },
222
+ }))
223
+ }
224
+
225
+ function convertToolChoice(toolChoice) {
226
+ if (!toolChoice || toolChoice === 'auto') return { type: 'auto' }
227
+ if (toolChoice === 'none') return { type: 'auto', disable_parallel_tool_use: true }
228
+ if (toolChoice === 'required') return { type: 'any' }
229
+ if (toolChoice.type === 'function' && toolChoice.function?.name) {
230
+ return { type: 'tool', name: toolChoice.function.name }
231
+ }
232
+ return { type: 'auto' }
233
+ }
234
+
235
+ function buildAnthropicPayload(requestBody) {
236
+ const converted = convertOpenAIToAnthropicMessages(requestBody.messages)
237
+ const payload = {
238
+ model: requestBody.model,
239
+ max_tokens: requestBody.max_tokens || requestBody.max_completion_tokens || requestBody.max_output_tokens || 4096,
240
+ messages: converted.messages,
241
+ stream: false,
242
+ }
243
+
244
+ if (converted.system) payload.system = converted.system
245
+ if (requestBody.temperature != null) payload.temperature = requestBody.temperature
246
+ if (requestBody.top_p != null) payload.top_p = requestBody.top_p
247
+ if (Array.isArray(requestBody.stop) && requestBody.stop.length) payload.stop_sequences = requestBody.stop
248
+ if (typeof requestBody.stop === 'string') payload.stop_sequences = [requestBody.stop]
249
+
250
+ const tools = convertOpenAIToolsToAnthropic(requestBody.tools)
251
+ if (tools.length) payload.tools = tools
252
+ if (requestBody.tool_choice) payload.tool_choice = convertToolChoice(requestBody.tool_choice)
253
+
254
+ return payload
255
+ }
256
+
257
+ function mapFinishReason(stopReason) {
258
+ if (stopReason === 'tool_use') return 'tool_calls'
259
+ if (stopReason === 'max_tokens') return 'length'
260
+ return 'stop'
261
+ }
262
+
263
+ function buildToolCalls(content) {
264
+ const calls = []
265
+ for (const block of content || []) {
266
+ if (block?.type !== 'tool_use') continue
267
+ calls.push({
268
+ id: block.id,
269
+ type: 'function',
270
+ function: {
271
+ name: block.name,
272
+ arguments: JSON.stringify(block.input || {}),
273
+ },
274
+ })
275
+ }
276
+ return calls
277
+ }
278
+
279
+ function anthropicToOpenAIResponse(responseBody, requestedModel) {
280
+ const text = (responseBody.content || [])
281
+ .filter((block) => block?.type === 'text')
282
+ .map((block) => block.text)
283
+ .join('')
284
+ const toolCalls = buildToolCalls(responseBody.content)
285
+
286
+ return {
287
+ id: responseBody.id || `chatcmpl_${Date.now()}`,
288
+ object: 'chat.completion',
289
+ created: Math.floor(Date.now() / 1000),
290
+ model: requestedModel,
291
+ choices: [{
292
+ index: 0,
293
+ message: {
294
+ role: 'assistant',
295
+ content: text || null,
296
+ ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
297
+ },
298
+ finish_reason: mapFinishReason(responseBody.stop_reason),
299
+ }],
300
+ usage: responseBody.usage
301
+ ? {
302
+ prompt_tokens: responseBody.usage.input_tokens || 0,
303
+ completion_tokens: responseBody.usage.output_tokens || 0,
304
+ total_tokens: (responseBody.usage.input_tokens || 0) + (responseBody.usage.output_tokens || 0),
305
+ }
306
+ : undefined,
307
+ }
308
+ }
309
+
310
+ function pickRoute(model) {
311
+ if (String(model).startsWith('gpt-')) return 'openai'
312
+ if (String(model).startsWith('claude-')) return 'anthropic'
313
+ if (String(model).startsWith('MiniMax-')) return 'minimax'
314
+ return 'openai'
315
+ }
316
+
317
+ function parseOpenAIStreamText(text) {
318
+ try {
319
+ const parsed = JSON.parse(String(text || ''))
320
+ if (parsed && typeof parsed === 'object') return parsed
321
+ } catch {}
322
+
323
+ const blocks = String(text || '').split(/\r?\n\r?\n+/).filter(Boolean)
324
+ let responseCompleted = null
325
+ let finalChunk = null
326
+ let content = ''
327
+ let sawOutputTextDelta = false
328
+
329
+ for (const block of blocks) {
330
+ const eventMatch = block.match(/^event:\s*(.+)$/m)
331
+ const dataMatch = block.match(/^data:\s*(.+)$/m)
332
+ if (!dataMatch) continue
333
+
334
+ const eventName = eventMatch ? eventMatch[1].trim() : ''
335
+ const payload = dataMatch[1].trim()
336
+ if (!payload || payload === '[DONE]') continue
337
+
338
+ let chunk
339
+ try {
340
+ chunk = JSON.parse(payload)
341
+ } catch {
342
+ continue
343
+ }
344
+
345
+ if (eventName === 'response.output_text.delta' && typeof chunk.delta === 'string') {
346
+ sawOutputTextDelta = true
347
+ content += chunk.delta
348
+ continue
349
+ }
350
+
351
+ if (eventName === 'response.content_part.done' && chunk.part?.type === 'output_text' && typeof chunk.part.text === 'string') {
352
+ if (!sawOutputTextDelta) content += chunk.part.text
353
+ continue
354
+ }
355
+
356
+ if (eventName === 'response.completed' && chunk.response) {
357
+ responseCompleted = chunk.response
358
+ if (!content) {
359
+ const outputText = (chunk.response.output || [])
360
+ .flatMap((item) => item?.content || [])
361
+ .filter((item) => item?.type === 'output_text' && typeof item.text === 'string')
362
+ .map((item) => item.text)
363
+ .join('')
364
+ if (outputText) content = outputText
365
+ }
366
+ continue
367
+ }
368
+
369
+ finalChunk = chunk
370
+ const choice = chunk.choices?.[0] || {}
371
+ const delta = choice.delta || {}
372
+ if (delta.content) content += delta.content
373
+ else if (choice.message?.content) content += choice.message.content
374
+ }
375
+
376
+ if (responseCompleted) {
377
+ return {
378
+ id: responseCompleted.id || `chatcmpl_${Date.now()}`,
379
+ object: 'chat.completion',
380
+ created: responseCompleted.created_at || Math.floor(Date.now() / 1000),
381
+ model: responseCompleted.model,
382
+ choices: [{
383
+ index: 0,
384
+ message: { role: 'assistant', content: content || null },
385
+ finish_reason: responseCompleted.status === 'completed' ? 'stop' : 'length',
386
+ }],
387
+ usage: responseCompleted.usage,
388
+ }
389
+ }
390
+
391
+ if (!finalChunk) return null
392
+
393
+ return {
394
+ id: finalChunk.id || `chatcmpl_${Date.now()}`,
395
+ object: 'chat.completion',
396
+ created: finalChunk.created || Math.floor(Date.now() / 1000),
397
+ model: finalChunk.model,
398
+ choices: [{
399
+ index: 0,
400
+ message: { role: 'assistant', content: content || null },
401
+ finish_reason: finalChunk.choices?.[0]?.finish_reason || 'stop',
402
+ }],
403
+ usage: finalChunk.usage,
404
+ }
405
+ }
406
+
407
+ async function relayOpenAIRequest(requestBody, config, res) {
408
+ const upstreamBody = {
409
+ ...requestBody,
410
+ stream: requestBody.stream === true,
411
+ }
412
+ const upstream = await fetch(`${config.baseUrlOpenAI.replace(/\/+$/, '')}/chat/completions`, {
413
+ method: 'POST',
414
+ headers: {
415
+ 'content-type': 'application/json',
416
+ authorization: `Bearer ${config.apiKey}`,
417
+ 'user-agent': 'holysheep-openclaw-bridge/1.0',
418
+ },
419
+ body: JSON.stringify(upstreamBody),
420
+ })
421
+
422
+ const text = await upstream.text()
423
+ if (!requestBody.stream) {
424
+ const parsed = parseOpenAIStreamText(text)
425
+ if (parsed) return sendJson(res, upstream.status, parsed)
426
+ }
427
+
428
+ res.writeHead(upstream.status, {
429
+ 'content-type': upstream.headers.get('content-type') || 'application/json; charset=utf-8',
430
+ 'cache-control': upstream.headers.get('cache-control') || 'no-store',
431
+ })
432
+ res.end(text)
433
+ }
434
+
435
+ async function relayAnthropicRequest(requestBody, config, route, res) {
436
+ const payload = buildAnthropicPayload(requestBody)
437
+ const baseUrl = route === 'minimax'
438
+ ? `${config.baseUrlAnthropic.replace(/\/+$/, '')}/minimax/v1/messages`
439
+ : `${config.baseUrlAnthropic.replace(/\/+$/, '')}/v1/messages`
440
+
441
+ const upstream = await fetch(baseUrl, {
442
+ method: 'POST',
443
+ headers: {
444
+ 'content-type': 'application/json',
445
+ 'x-api-key': config.apiKey,
446
+ 'anthropic-version': '2023-06-01',
447
+ 'user-agent': 'holysheep-openclaw-bridge/1.0',
448
+ },
449
+ body: JSON.stringify(payload),
450
+ })
451
+
452
+ const text = await upstream.text()
453
+ let body
454
+ try {
455
+ body = JSON.parse(text)
456
+ } catch {
457
+ body = { error: { message: text || 'Invalid upstream response' } }
458
+ }
459
+
460
+ if (!upstream.ok) {
461
+ return sendJson(res, upstream.status, body)
462
+ }
463
+
464
+ const openaiBody = anthropicToOpenAIResponse(body, requestBody.model)
465
+ if (requestBody.stream) return sendOpenAIStream(res, openaiBody)
466
+ return sendJson(res, 200, openaiBody)
467
+ }
468
+
469
+ function buildModelsResponse(config) {
470
+ return {
471
+ object: 'list',
472
+ data: (config.models || []).map((model) => ({
473
+ id: model,
474
+ object: 'model',
475
+ owned_by: 'holysheep',
476
+ })),
477
+ }
478
+ }
479
+
480
+ function createBridgeServer(configPath = BRIDGE_CONFIG_FILE) {
481
+ return http.createServer(async (req, res) => {
482
+ if (req.method === 'OPTIONS') {
483
+ res.writeHead(204, {
484
+ 'access-control-allow-origin': '*',
485
+ 'access-control-allow-methods': 'GET,POST,OPTIONS',
486
+ 'access-control-allow-headers': 'content-type,authorization,x-api-key,anthropic-version',
487
+ })
488
+ return res.end()
489
+ }
490
+
491
+ try {
492
+ const config = readBridgeConfig(configPath)
493
+ const url = new URL(req.url, `http://${req.headers.host || '127.0.0.1'}`)
494
+
495
+ if (req.method === 'GET' && url.pathname === '/health') {
496
+ return sendJson(res, 200, { ok: true, port: config.port, models: config.models || [] })
497
+ }
498
+
499
+ if (req.method === 'GET' && url.pathname === '/v1/models') {
500
+ return sendJson(res, 200, buildModelsResponse(config))
501
+ }
502
+
503
+ if (req.method === 'POST' && url.pathname === '/v1/chat/completions') {
504
+ const requestBody = await readJsonBody(req)
505
+ const route = pickRoute(requestBody.model)
506
+ if (route === 'openai') return relayOpenAIRequest(requestBody, config, res)
507
+ return relayAnthropicRequest(requestBody, config, route, res)
508
+ }
509
+
510
+ return sendJson(res, 404, { error: { message: 'Not found' } })
511
+ } catch (error) {
512
+ return sendJson(res, 500, { error: { message: error.message || 'Bridge error' } })
513
+ }
514
+ })
515
+ }
516
+
517
+ function startBridge(args = parseArgs(process.argv.slice(2))) {
518
+ const config = readBridgeConfig(args.config)
519
+ const port = args.port || config.port
520
+ const host = args.host || '127.0.0.1'
521
+ const server = createBridgeServer(args.config)
522
+
523
+ server.listen(port, host, () => {
524
+ process.stdout.write(`HolySheep OpenClaw bridge listening on http://${host}:${port}\n`)
525
+ })
526
+
527
+ return server
528
+ }
529
+
530
+ if (require.main === module) {
531
+ startBridge()
532
+ }
533
+
534
+ module.exports = {
535
+ BRIDGE_CONFIG_FILE,
536
+ buildAnthropicPayload,
537
+ anthropicToOpenAIResponse,
538
+ buildModelsResponse,
539
+ createBridgeServer,
540
+ parseArgs,
541
+ parseOpenAIStreamText,
542
+ pickRoute,
543
+ readBridgeConfig,
544
+ startBridge,
545
+ }
@@ -9,16 +9,18 @@ const path = require('path')
9
9
  const os = require('os')
10
10
  const { spawnSync, spawn, execSync } = require('child_process')
11
11
  const { commandExists } = require('../utils/which')
12
+ const { BRIDGE_CONFIG_FILE } = require('./openclaw-bridge')
12
13
 
13
14
  const OPENCLAW_DIR = path.join(os.homedir(), '.openclaw')
14
15
  const CONFIG_FILE = path.join(OPENCLAW_DIR, 'openclaw.json')
15
16
  const isWin = process.platform === 'win32'
17
+ const DEFAULT_BRIDGE_PORT = 18788
16
18
  const DEFAULT_GATEWAY_PORT = 18789
17
- const MAX_PORT_SCAN = 20
19
+ const MAX_PORT_SCAN = 40
18
20
  const OPENCLAW_DEFAULT_MODEL = 'gpt-5.4'
19
21
  const OPENCLAW_DEFAULT_CLAUDE_MODEL = 'claude-sonnet-4-6'
20
22
  const OPENCLAW_DEFAULT_MINIMAX_MODEL = 'MiniMax-M2.7-highspeed'
21
- const OPENCLAW_ROUTING_REGRESSION_VERSION = /^2026\.3\.13(?:\D|$)/
23
+ const OPENCLAW_PROVIDER_NAME = 'holysheep'
22
24
 
23
25
  function getOpenClawBinaryCandidates() {
24
26
  return isWin ? ['openclaw.cmd', 'openclaw'] : ['openclaw']
@@ -144,16 +146,67 @@ function detectRuntime() {
144
146
  return { available: false, via: null, command: null, version: null }
145
147
  }
146
148
 
147
- function isRoutingRegressionVersion(version) {
148
- return OPENCLAW_ROUTING_REGRESSION_VERSION.test(String(version || '').trim())
149
+ function readBridgeConfig() {
150
+ try {
151
+ if (fs.existsSync(BRIDGE_CONFIG_FILE)) {
152
+ return JSON.parse(fs.readFileSync(BRIDGE_CONFIG_FILE, 'utf8'))
153
+ }
154
+ } catch {}
155
+ return {}
156
+ }
157
+
158
+ function writeBridgeConfig(data) {
159
+ fs.mkdirSync(OPENCLAW_DIR, { recursive: true })
160
+ fs.writeFileSync(BRIDGE_CONFIG_FILE, JSON.stringify(data, null, 2), 'utf8')
161
+ }
162
+
163
+ function getConfiguredBridgePort(config = readBridgeConfig()) {
164
+ const port = Number(config?.port)
165
+ return Number.isInteger(port) && port > 0 ? port : DEFAULT_BRIDGE_PORT
166
+ }
167
+
168
+ function getBridgeBaseUrl(port = getConfiguredBridgePort()) {
169
+ return `http://127.0.0.1:${port}/v1`
149
170
  }
150
171
 
151
- function getRoutingRegressionWarning(runtimeVersion, minimaxModelRef) {
152
- if (!isRoutingRegressionVersion(runtimeVersion) || !minimaxModelRef) {
153
- return ''
172
+ function waitForBridge(port) {
173
+ for (let i = 0; i < 10; i++) {
174
+ const t0 = Date.now()
175
+ while (Date.now() - t0 < 500) {}
176
+
177
+ try {
178
+ execSync(
179
+ isWin
180
+ ? `powershell -NonInteractive -Command "try{(Invoke-WebRequest -Uri http://127.0.0.1:${port}/health -TimeoutSec 1 -UseBasicParsing).StatusCode}catch{exit 1}"`
181
+ : `curl -sf http://127.0.0.1:${port}/health -o /dev/null --max-time 1`,
182
+ { stdio: 'ignore', timeout: 3000 }
183
+ )
184
+ return true
185
+ } catch {}
154
186
  }
155
187
 
156
- return `当前 OpenClaw 2026.3.13 存在 provider 路由回归,但 HolySheep 仍会保留 MiniMax 配置。若网页模型切换失败,请直接输入 /model ${minimaxModelRef},或升级 OpenClaw 后再试。`
188
+ return false
189
+ }
190
+
191
+ function startBridge(port) {
192
+ if (waitForBridge(port)) return true
193
+
194
+ const scriptPath = path.join(__dirname, '..', 'index.js')
195
+ const child = spawn(process.execPath, [scriptPath, 'openclaw-bridge', '--port', String(port)], {
196
+ detached: true,
197
+ stdio: 'ignore',
198
+ })
199
+ child.unref()
200
+ return waitForBridge(port)
201
+ }
202
+
203
+ function getBridgeCommand(port = getConfiguredBridgePort()) {
204
+ return `hs openclaw-bridge --port ${port}`
205
+ }
206
+
207
+ function pickPrimaryModel(primaryModel, selectedModels) {
208
+ const models = Array.isArray(selectedModels) ? selectedModels : []
209
+ return primaryModel || models[0] || OPENCLAW_DEFAULT_MODEL
157
210
  }
158
211
 
159
212
  function readConfig() {
@@ -245,11 +298,6 @@ function getDashboardCommand() {
245
298
  return `${runtime} dashboard --no-open`
246
299
  }
247
300
 
248
- function buildProviderName(baseUrl, prefix) {
249
- const hostname = new URL(baseUrl).hostname.replace(/\./g, '-')
250
- return `${prefix}-${hostname}`
251
- }
252
-
253
301
  function buildModelEntry(id) {
254
302
  return {
255
303
  id,
@@ -261,79 +309,51 @@ function buildModelEntry(id) {
261
309
  }
262
310
  }
263
311
 
264
- function buildManagedPlan(apiKey, baseUrlAnthropic, baseUrlOpenAI, selectedModels) {
312
+ function normalizeRequestedModels(selectedModels) {
265
313
  const requestedModels = Array.isArray(selectedModels) && selectedModels.length > 0
266
- ? selectedModels
314
+ ? [...selectedModels]
267
315
  : [OPENCLAW_DEFAULT_MODEL, OPENCLAW_DEFAULT_CLAUDE_MODEL, OPENCLAW_DEFAULT_MINIMAX_MODEL]
268
316
 
269
- const openaiModels = requestedModels.filter((model) => model.startsWith('gpt-'))
270
- if (!openaiModels.includes(OPENCLAW_DEFAULT_MODEL)) {
271
- openaiModels.unshift(OPENCLAW_DEFAULT_MODEL)
272
- }
273
-
274
- const claudeModels = requestedModels.filter((model) => model.startsWith('claude-'))
275
- if (claudeModels.length === 0) {
276
- claudeModels.push(OPENCLAW_DEFAULT_CLAUDE_MODEL)
277
- }
278
-
279
- const minimaxModels = requestedModels.filter((model) => model.startsWith('MiniMax-'))
280
- if (requestedModels.includes(OPENCLAW_DEFAULT_MINIMAX_MODEL) && !minimaxModels.includes(OPENCLAW_DEFAULT_MINIMAX_MODEL)) {
281
- minimaxModels.unshift(OPENCLAW_DEFAULT_MINIMAX_MODEL)
282
- }
283
-
284
- const openaiProviderName = buildProviderName(baseUrlOpenAI, 'custom-openai')
285
- const anthropicProviderName = buildProviderName(baseUrlAnthropic, 'custom-anthropic')
286
- const minimaxProviderName = buildProviderName(`${baseUrlAnthropic.replace(/\/+$/, '')}/minimax`, 'custom-minimax')
287
-
288
- const providers = {
289
- [openaiProviderName]: {
290
- baseUrl: baseUrlOpenAI,
291
- apiKey,
292
- api: 'openai-completions',
293
- models: openaiModels.map(buildModelEntry),
294
- },
295
- [anthropicProviderName]: {
296
- baseUrl: baseUrlAnthropic,
297
- apiKey,
298
- api: 'anthropic-messages',
299
- models: claudeModels.map(buildModelEntry),
300
- },
301
- }
302
-
303
- if (minimaxModels.length > 0) {
304
- providers[minimaxProviderName] = {
305
- baseUrl: `${baseUrlAnthropic.replace(/\/+$/, '')}/minimax`,
306
- apiKey,
307
- api: 'anthropic-messages',
308
- models: minimaxModels.map(buildModelEntry),
309
- }
310
- }
317
+ if (!requestedModels.includes(OPENCLAW_DEFAULT_MODEL)) requestedModels.unshift(OPENCLAW_DEFAULT_MODEL)
318
+ return Array.from(new Set(requestedModels))
319
+ }
311
320
 
312
- const managedModelRefs = [
313
- ...openaiModels.map((id) => `${openaiProviderName}/${id}`),
314
- ...claudeModels.map((id) => `${anthropicProviderName}/${id}`),
315
- ...minimaxModels.map((id) => `${minimaxProviderName}/${id}`),
316
- ]
321
+ function buildManagedPlan(baseUrlBridge, primaryModel, selectedModels) {
322
+ const requestedModels = normalizeRequestedModels(selectedModels)
323
+ const managedModelRefs = requestedModels.map((model) => `${OPENCLAW_PROVIDER_NAME}/${model}`)
324
+ const fallbackPrimaryModel = pickPrimaryModel(primaryModel, requestedModels)
325
+ const primaryRef = managedModelRefs.includes(`${OPENCLAW_PROVIDER_NAME}/${fallbackPrimaryModel}`)
326
+ ? `${OPENCLAW_PROVIDER_NAME}/${fallbackPrimaryModel}`
327
+ : managedModelRefs[0] || `${OPENCLAW_PROVIDER_NAME}/${OPENCLAW_DEFAULT_MODEL}`
317
328
 
318
329
  return {
319
- providers,
330
+ providers: {
331
+ [OPENCLAW_PROVIDER_NAME]: {
332
+ baseUrl: baseUrlBridge,
333
+ api: 'openai-completions',
334
+ models: requestedModels.map(buildModelEntry),
335
+ },
336
+ },
320
337
  managedModelRefs,
321
- primaryRef: `${openaiProviderName}/${OPENCLAW_DEFAULT_MODEL}`,
322
- minimaxRef: minimaxModels[0] ? `${minimaxProviderName}/${minimaxModels[0]}` : '',
338
+ models: requestedModels,
339
+ primaryRef,
323
340
  }
324
341
  }
325
342
 
326
343
  function isHolySheepProvider(provider) {
327
- return typeof provider?.baseUrl === 'string' && provider.baseUrl.includes('api.holysheep.ai')
344
+ return typeof provider?.baseUrl === 'string' && (
345
+ provider.baseUrl.includes('api.holysheep.ai') ||
346
+ provider.baseUrl.includes('127.0.0.1')
347
+ )
328
348
  }
329
349
 
330
- function writeManagedConfig(baseConfig, apiKey, baseUrlAnthropic, baseUrlOpenAI, selectedModels, gatewayPort) {
350
+ function writeManagedConfig(baseConfig, bridgeBaseUrl, primaryModel, selectedModels, gatewayPort) {
331
351
  fs.mkdirSync(OPENCLAW_DIR, { recursive: true })
332
352
 
333
- const plan = buildManagedPlan(apiKey, baseUrlAnthropic, baseUrlOpenAI, selectedModels)
353
+ const plan = buildManagedPlan(bridgeBaseUrl, primaryModel, selectedModels)
334
354
  const existingProviders = baseConfig?.models?.providers || {}
335
355
  const managedProviderIds = Object.entries(existingProviders)
336
- .filter(([, provider]) => isHolySheepProvider(provider))
356
+ .filter(([providerId, provider]) => providerId === OPENCLAW_PROVIDER_NAME || isHolySheepProvider(provider))
337
357
  .map(([providerId]) => providerId)
338
358
 
339
359
  const preservedProviders = Object.fromEntries(
@@ -452,7 +472,7 @@ module.exports = {
452
472
  id: 'openclaw',
453
473
 
454
474
  checkInstalled() {
455
- return hasOpenClawBinary()
475
+ return detectRuntime().available
456
476
  },
457
477
 
458
478
  detectRuntime,
@@ -462,11 +482,13 @@ module.exports = {
462
482
  },
463
483
 
464
484
  isConfigured() {
465
- const cfg = JSON.stringify(readConfig())
466
- return cfg.includes('holysheep.ai')
485
+ const cfg = readConfig()
486
+ const hasProvider = cfg?.models?.providers?.[OPENCLAW_PROVIDER_NAME]?.baseUrl?.includes('127.0.0.1')
487
+ const bridge = readBridgeConfig()
488
+ return Boolean(hasProvider && bridge?.apiKey)
467
489
  },
468
490
 
469
- configure(apiKey, baseUrlAnthropic, baseUrlOpenAI, _primaryModel, selectedModels) {
491
+ configure(apiKey, baseUrlAnthropic, baseUrlOpenAI, primaryModel, selectedModels) {
470
492
  const chalk = require('chalk')
471
493
  console.log(chalk.gray('\n ⚙️ 正在配置 OpenClaw...'))
472
494
 
@@ -476,6 +498,27 @@ module.exports = {
476
498
  }
477
499
  this._lastRuntimeCommand = runtime.command
478
500
 
501
+ const resolvedPrimaryModel = pickPrimaryModel(primaryModel, selectedModels)
502
+ const bridgePort = findAvailableGatewayPort(DEFAULT_BRIDGE_PORT)
503
+ if (!bridgePort) {
504
+ throw new Error(`找不到可用桥接端口(已检查 ${DEFAULT_BRIDGE_PORT}-${DEFAULT_BRIDGE_PORT + MAX_PORT_SCAN - 1})`)
505
+ }
506
+ this._lastBridgePort = bridgePort
507
+
508
+ writeBridgeConfig({
509
+ port: bridgePort,
510
+ apiKey,
511
+ baseUrlAnthropic,
512
+ baseUrlOpenAI,
513
+ models: normalizeRequestedModels(selectedModels),
514
+ })
515
+
516
+ console.log(chalk.gray(' → 正在启动 HolySheep Bridge...'))
517
+ if (!startBridge(bridgePort)) {
518
+ throw new Error('HolySheep OpenClaw Bridge 启动失败')
519
+ }
520
+ const bridgeBaseUrl = getBridgeBaseUrl(bridgePort)
521
+
479
522
  runOpenClaw(['gateway', 'stop'], { preferNpx: runtime.via === 'npx' })
480
523
 
481
524
  const gatewayPort = findAvailableGatewayPort(DEFAULT_GATEWAY_PORT)
@@ -504,9 +547,9 @@ module.exports = {
504
547
  '--non-interactive',
505
548
  '--accept-risk',
506
549
  '--auth-choice', 'custom-api-key',
507
- '--custom-base-url', baseUrlOpenAI,
550
+ '--custom-base-url', bridgeBaseUrl,
508
551
  '--custom-api-key', apiKey,
509
- '--custom-model-id', OPENCLAW_DEFAULT_MODEL,
552
+ '--custom-model-id', resolvedPrimaryModel,
510
553
  '--custom-compatibility', 'openai',
511
554
  '--gateway-port', String(gatewayPort),
512
555
  '--install-daemon',
@@ -518,18 +561,12 @@ module.exports = {
518
561
 
519
562
  const plan = writeManagedConfig(
520
563
  result.status === 0 ? readConfig() : {},
521
- apiKey,
522
- baseUrlAnthropic,
523
- baseUrlOpenAI,
564
+ bridgeBaseUrl,
565
+ resolvedPrimaryModel,
524
566
  selectedModels,
525
567
  gatewayPort,
526
568
  )
527
569
 
528
- const routingRegressionWarning = getRoutingRegressionWarning(runtime.version, plan.minimaxRef)
529
- if (routingRegressionWarning) {
530
- console.log(chalk.yellow(` ⚠️ ${routingRegressionWarning}`))
531
- }
532
-
533
570
  _disableGatewayAuth(runtime.via === 'npx')
534
571
  const serviceReady = _installGatewayService(gatewayPort, runtime.via === 'npx')
535
572
 
@@ -545,7 +582,8 @@ module.exports = {
545
582
  const dashUrl = getDashboardUrl(gatewayPort, runtime.via === 'npx')
546
583
  console.log(chalk.cyan('\n → 浏览器打开(推荐使用此地址):'))
547
584
  console.log(chalk.bold.cyan(` ${dashUrl}`))
548
- console.log(chalk.gray(` 默认模型: ${OPENCLAW_DEFAULT_MODEL}`))
585
+ console.log(chalk.gray(` Bridge 地址: ${bridgeBaseUrl}`))
586
+ console.log(chalk.gray(` 默认模型: ${plan.primaryRef || OPENCLAW_DEFAULT_MODEL}`))
549
587
  console.log(chalk.gray(' 如在 Windows 上打开裸 http://127.0.0.1:PORT/ 仍报 Unauthorized,请使用上面的 dashboard 地址'))
550
588
 
551
589
  return {
@@ -559,24 +597,28 @@ module.exports = {
559
597
 
560
598
  reset() {
561
599
  try { fs.unlinkSync(CONFIG_FILE) } catch {}
600
+ try { fs.unlinkSync(BRIDGE_CONFIG_FILE) } catch {}
562
601
  },
563
602
 
564
603
  getConfigPath() { return CONFIG_FILE },
604
+ getBridgePort() { return getConfiguredBridgePort() },
565
605
  getGatewayPort() { return getConfiguredGatewayPort() },
566
606
  getPrimaryModel() { return getConfiguredPrimaryModel() },
567
607
  getPortListeners(port = getConfiguredGatewayPort()) { return listPortListeners(port) },
568
608
  get hint() {
569
- return `Gateway 已启动,默认模型为 ${getConfiguredPrimaryModel() || OPENCLAW_DEFAULT_MODEL}`
609
+ return `Bridge + Gateway 已配置,默认模型为 ${getConfiguredPrimaryModel() || OPENCLAW_DEFAULT_MODEL}`
570
610
  },
571
611
  get launchSteps() {
612
+ const bridgePort = getConfiguredBridgePort()
572
613
  const port = getConfiguredGatewayPort()
573
614
  return [
574
- { cmd: getLaunchCommand(port), note: '先启动 OpenClaw Gateway' },
615
+ { cmd: getBridgeCommand(bridgePort), note: '先启动 HolySheep OpenClaw Bridge' },
616
+ { cmd: getLaunchCommand(port), note: '再启动 OpenClaw Gateway' },
575
617
  { cmd: getDashboardCommand(), note: '再生成/打开可直接连接的 Dashboard 地址(推荐)' },
576
618
  ]
577
619
  },
578
620
  get launchNote() {
579
- return `🌐 推荐运行 ${getDashboardCommand()};Windows 上不要只打开裸 http://127.0.0.1:${getConfiguredGatewayPort()}/`
621
+ return `🌐 请先启动 Bridge,再启动 Gateway;最后运行 ${getDashboardCommand()}`
580
622
  },
581
623
  installCmd: 'npm install -g openclaw@latest',
582
624
  docsUrl: 'https://docs.openclaw.ai',