@simonyea/holysheep-cli 2.1.40 → 2.1.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/configure-worker.js +4491 -0
- package/dist/index.js +9591 -0
- package/dist/process-proxy-inject.js +117 -0
- package/package.json +20 -7
- package/.gitea/workflows/sanity.yml +0 -125
- package/scripts/check-tarball-size.js +0 -44
- package/src/commands/balance.js +0 -57
- package/src/commands/claude-proxy.js +0 -248
- package/src/commands/claude.js +0 -135
- package/src/commands/doctor.js +0 -282
- package/src/commands/login.js +0 -211
- package/src/commands/openclaw.js +0 -258
- package/src/commands/reset.js +0 -53
- package/src/commands/setup.js +0 -493
- package/src/commands/upgrade.js +0 -168
- package/src/commands/webui.js +0 -622
- package/src/index.js +0 -226
- package/src/tools/aider.js +0 -78
- package/src/tools/antigravity.js +0 -42
- package/src/tools/claude-code.js +0 -228
- package/src/tools/claude-process-proxy.js +0 -1030
- package/src/tools/codex.js +0 -254
- package/src/tools/continue.js +0 -146
- package/src/tools/cursor.js +0 -71
- package/src/tools/droid.js +0 -281
- package/src/tools/env-config.js +0 -185
- package/src/tools/gemini-cli.js +0 -82
- package/src/tools/hermes.js +0 -354
- package/src/tools/index.js +0 -13
- package/src/tools/openclaw-bridge.js +0 -987
- package/src/tools/openclaw.js +0 -925
- package/src/tools/opencode.js +0 -227
- package/src/tools/process-proxy-inject.js +0 -142
- package/src/utils/config.js +0 -54
- package/src/utils/shell.js +0 -342
- package/src/utils/which.js +0 -176
- package/src/webui/aionui-runtime-fetcher.js +0 -429
- package/src/webui/aionui-runtime.js +0 -139
- package/src/webui/aionui-wrapper.js +0 -734
- package/src/webui/configure-worker.js +0 -67
- package/src/webui/server.js +0 -1572
- package/src/webui/workspace-runtime.js +0 -288
- package/src/webui/workspace-store.js +0 -325
- /package/{src/webui → dist}/index.html +0 -0
- /package/{src/tools → dist}/pty-hermes-wrapper.py +0 -0
|
@@ -1,987 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
'use strict'
|
|
3
|
-
|
|
4
|
-
const fs = require('fs')
|
|
5
|
-
const http = require('http')
|
|
6
|
-
const path = require('path')
|
|
7
|
-
const os = require('os')
|
|
8
|
-
const fetch = global.fetch || require('node-fetch')
|
|
9
|
-
const _nodeFetch = require('node-fetch')
|
|
10
|
-
|
|
11
|
-
// Windows 上 api.holysheep.ai 有 IPv6 DNS 记录,但 Windows Server 普遍未启用 IPv6。
|
|
12
|
-
// Node.js 默认 IPv6 优先,会导致每次请求先卡在 IPv6 连接超时再降级 IPv4,
|
|
13
|
-
// 超过 OpenClaw embedded agent 的 timeout 阈值,触发 "LLM request timed out"。
|
|
14
|
-
// 解决方案:Windows 下强制用 node-fetch + https.Agent({family:4}) 只走 IPv4。
|
|
15
|
-
function upstreamFetch(url, options) {
|
|
16
|
-
if (process.platform === 'win32' && String(url).startsWith('https://')) {
|
|
17
|
-
const https = require('https')
|
|
18
|
-
return _nodeFetch(url, { ...options, agent: new https.Agent({ family: 4 }) })
|
|
19
|
-
}
|
|
20
|
-
return fetch(url, options)
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
const OPENCLAW_DIR = path.join(os.homedir(), '.openclaw')
|
|
24
|
-
const BRIDGE_CONFIG_FILE = path.join(OPENCLAW_DIR, 'holysheep-bridge.json')
|
|
25
|
-
const DEFAULT_WATCHDOG_INTERVAL_MS = 3000
|
|
26
|
-
const DEFAULT_WATCHDOG_FAILURE_THRESHOLD = 3
|
|
27
|
-
const DEFAULT_WATCHDOG_STARTUP_GRACE_MS = 30000
|
|
28
|
-
const DEFAULT_WATCHDOG_REQUEST_TIMEOUT_MS = 1500
|
|
29
|
-
|
|
30
|
-
function readBridgeConfig(configPath = BRIDGE_CONFIG_FILE) {
|
|
31
|
-
return JSON.parse(fs.readFileSync(configPath, 'utf8'))
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
function parseArgs(argv) {
|
|
35
|
-
const args = { port: null, host: '127.0.0.1', config: BRIDGE_CONFIG_FILE }
|
|
36
|
-
for (let i = 0; i < argv.length; i++) {
|
|
37
|
-
const value = argv[i]
|
|
38
|
-
if (value === '--port') args.port = Number(argv[++i])
|
|
39
|
-
else if (value === '--host') args.host = argv[++i]
|
|
40
|
-
else if (value === '--config') args.config = argv[++i]
|
|
41
|
-
}
|
|
42
|
-
return args
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
function readJsonBody(req) {
|
|
46
|
-
return new Promise((resolve, reject) => {
|
|
47
|
-
let raw = ''
|
|
48
|
-
req.on('data', (chunk) => {
|
|
49
|
-
raw += chunk
|
|
50
|
-
if (raw.length > 5 * 1024 * 1024) {
|
|
51
|
-
reject(new Error('Request body too large'))
|
|
52
|
-
req.destroy()
|
|
53
|
-
}
|
|
54
|
-
})
|
|
55
|
-
req.on('end', () => {
|
|
56
|
-
if (!raw) return resolve({})
|
|
57
|
-
try {
|
|
58
|
-
resolve(JSON.parse(raw))
|
|
59
|
-
} catch (error) {
|
|
60
|
-
reject(error)
|
|
61
|
-
}
|
|
62
|
-
})
|
|
63
|
-
req.on('error', reject)
|
|
64
|
-
})
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
function sendJson(res, statusCode, payload) {
|
|
68
|
-
res.writeHead(statusCode, {
|
|
69
|
-
'content-type': 'application/json; charset=utf-8',
|
|
70
|
-
'cache-control': 'no-store',
|
|
71
|
-
})
|
|
72
|
-
res.end(JSON.stringify(payload))
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
function sendOpenAIStream(res, payload) {
|
|
76
|
-
const choice = payload.choices?.[0] || {}
|
|
77
|
-
const message = choice.message || {}
|
|
78
|
-
const created = payload.created || Math.floor(Date.now() / 1000)
|
|
79
|
-
const messageContent = extractOpenAITextContent(message.content)
|
|
80
|
-
|
|
81
|
-
res.writeHead(200, {
|
|
82
|
-
'content-type': 'text/event-stream; charset=utf-8',
|
|
83
|
-
'cache-control': 'no-cache, no-transform',
|
|
84
|
-
connection: 'keep-alive',
|
|
85
|
-
})
|
|
86
|
-
|
|
87
|
-
const firstChunk = {
|
|
88
|
-
id: payload.id,
|
|
89
|
-
object: 'chat.completion.chunk',
|
|
90
|
-
created,
|
|
91
|
-
model: payload.model,
|
|
92
|
-
choices: [{
|
|
93
|
-
index: 0,
|
|
94
|
-
delta: {
|
|
95
|
-
role: 'assistant',
|
|
96
|
-
...(messageContent ? { content: messageContent } : {}),
|
|
97
|
-
...(message.tool_calls ? { tool_calls: message.tool_calls } : {}),
|
|
98
|
-
},
|
|
99
|
-
finish_reason: null,
|
|
100
|
-
}],
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
const finalChunk = {
|
|
104
|
-
id: payload.id,
|
|
105
|
-
object: 'chat.completion.chunk',
|
|
106
|
-
created,
|
|
107
|
-
model: payload.model,
|
|
108
|
-
choices: [{ index: 0, delta: {}, finish_reason: choice.finish_reason || 'stop' }],
|
|
109
|
-
usage: payload.usage,
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
res.write(`data: ${JSON.stringify(firstChunk)}\n\n`)
|
|
113
|
-
res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
|
|
114
|
-
res.end('data: [DONE]\n\n')
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
function normalizeText(value) {
|
|
118
|
-
if (typeof value === 'string') return value
|
|
119
|
-
if (Array.isArray(value)) return value.map(normalizeText).filter(Boolean).join('\n')
|
|
120
|
-
if (value && typeof value === 'object') {
|
|
121
|
-
if (typeof value.text === 'string') return value.text
|
|
122
|
-
if (typeof value.output_text === 'string') return value.output_text
|
|
123
|
-
if (typeof value.content === 'string') return value.content
|
|
124
|
-
if (typeof value.value === 'string') return value.value
|
|
125
|
-
}
|
|
126
|
-
return value == null ? '' : String(value)
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
function extractOpenAITextContent(content) {
|
|
130
|
-
if (typeof content === 'string') return content
|
|
131
|
-
if (!Array.isArray(content)) return normalizeText(content)
|
|
132
|
-
|
|
133
|
-
return content
|
|
134
|
-
.map((part) => {
|
|
135
|
-
if (typeof part === 'string') return part
|
|
136
|
-
if (!part || typeof part !== 'object') return ''
|
|
137
|
-
if (part.type === 'text') return normalizeText(part.text)
|
|
138
|
-
if (part.type === 'output_text') return normalizeText(part.text)
|
|
139
|
-
if (part.type === 'input_text') return normalizeText(part.text)
|
|
140
|
-
return normalizeText(part.text || part.content || part.value)
|
|
141
|
-
})
|
|
142
|
-
.filter(Boolean)
|
|
143
|
-
.join('')
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
function parseDataUrl(url) {
|
|
147
|
-
const match = String(url || '').match(/^data:([^;]+);base64,(.+)$/)
|
|
148
|
-
if (!match) return null
|
|
149
|
-
return { mediaType: match[1], data: match[2] }
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
function openAIContentToAnthropicBlocks(content) {
|
|
153
|
-
if (typeof content === 'string') return [{ type: 'text', text: content }]
|
|
154
|
-
if (!Array.isArray(content)) return []
|
|
155
|
-
|
|
156
|
-
const blocks = []
|
|
157
|
-
for (const part of content) {
|
|
158
|
-
if (!part) continue
|
|
159
|
-
if (part.type === 'text' && typeof part.text === 'string') {
|
|
160
|
-
blocks.push({ type: 'text', text: part.text })
|
|
161
|
-
continue
|
|
162
|
-
}
|
|
163
|
-
if (part.type === 'image_url' && part.image_url?.url) {
|
|
164
|
-
const dataUrl = parseDataUrl(part.image_url.url)
|
|
165
|
-
if (dataUrl) {
|
|
166
|
-
blocks.push({
|
|
167
|
-
type: 'image',
|
|
168
|
-
source: { type: 'base64', media_type: dataUrl.mediaType, data: dataUrl.data },
|
|
169
|
-
})
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
return blocks
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
function pushAnthropicMessage(messages, role, blocks) {
|
|
177
|
-
if (!blocks.length) return
|
|
178
|
-
const previous = messages[messages.length - 1]
|
|
179
|
-
if (previous && previous.role === role) {
|
|
180
|
-
previous.content = previous.content.concat(blocks)
|
|
181
|
-
return
|
|
182
|
-
}
|
|
183
|
-
messages.push({ role, content: blocks })
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
function convertOpenAIToAnthropicMessages(messages) {
|
|
187
|
-
const anthropicMessages = []
|
|
188
|
-
const systemParts = []
|
|
189
|
-
|
|
190
|
-
for (const message of messages || []) {
|
|
191
|
-
if (!message) continue
|
|
192
|
-
|
|
193
|
-
if (message.role === 'system') {
|
|
194
|
-
const blocks = openAIContentToAnthropicBlocks(message.content)
|
|
195
|
-
if (blocks.length === 0) {
|
|
196
|
-
const text = normalizeText(message.content)
|
|
197
|
-
if (text) systemParts.push(text)
|
|
198
|
-
} else {
|
|
199
|
-
for (const block of blocks) {
|
|
200
|
-
if (block.type === 'text') systemParts.push(block.text)
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
continue
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
if (message.role === 'tool') {
|
|
207
|
-
pushAnthropicMessage(anthropicMessages, 'user', [{
|
|
208
|
-
type: 'tool_result',
|
|
209
|
-
tool_use_id: message.tool_call_id,
|
|
210
|
-
content: normalizeText(message.content),
|
|
211
|
-
}])
|
|
212
|
-
continue
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
if (message.role === 'assistant') {
|
|
216
|
-
const blocks = []
|
|
217
|
-
const textBlocks = openAIContentToAnthropicBlocks(message.content)
|
|
218
|
-
if (textBlocks.length) blocks.push(...textBlocks)
|
|
219
|
-
else if (typeof message.content === 'string' && message.content) blocks.push({ type: 'text', text: message.content })
|
|
220
|
-
|
|
221
|
-
for (const toolCall of message.tool_calls || []) {
|
|
222
|
-
let input = {}
|
|
223
|
-
try {
|
|
224
|
-
input = JSON.parse(toolCall.function?.arguments || '{}')
|
|
225
|
-
} catch {}
|
|
226
|
-
blocks.push({
|
|
227
|
-
type: 'tool_use',
|
|
228
|
-
id: toolCall.id,
|
|
229
|
-
name: toolCall.function?.name || 'tool',
|
|
230
|
-
input,
|
|
231
|
-
})
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
pushAnthropicMessage(anthropicMessages, 'assistant', blocks)
|
|
235
|
-
continue
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
const blocks = openAIContentToAnthropicBlocks(message.content)
|
|
239
|
-
if (blocks.length) pushAnthropicMessage(anthropicMessages, 'user', blocks)
|
|
240
|
-
else {
|
|
241
|
-
const text = normalizeText(message.content)
|
|
242
|
-
if (text) pushAnthropicMessage(anthropicMessages, 'user', [{ type: 'text', text }])
|
|
243
|
-
}
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
return {
|
|
247
|
-
system: systemParts.join('\n\n').trim() || undefined,
|
|
248
|
-
messages: anthropicMessages,
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
function convertOpenAIToolsToAnthropic(tools) {
|
|
253
|
-
return (tools || [])
|
|
254
|
-
.filter((tool) => tool?.type === 'function' && tool.function?.name)
|
|
255
|
-
.map((tool) => ({
|
|
256
|
-
name: tool.function.name,
|
|
257
|
-
description: tool.function.description || '',
|
|
258
|
-
input_schema: tool.function.parameters || { type: 'object', properties: {} },
|
|
259
|
-
}))
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
function convertToolChoice(toolChoice) {
|
|
263
|
-
if (!toolChoice || toolChoice === 'auto') return { type: 'auto' }
|
|
264
|
-
if (toolChoice === 'none') return { type: 'auto', disable_parallel_tool_use: true }
|
|
265
|
-
if (toolChoice === 'required') return { type: 'any' }
|
|
266
|
-
if (toolChoice.type === 'function' && toolChoice.function?.name) {
|
|
267
|
-
return { type: 'tool', name: toolChoice.function.name }
|
|
268
|
-
}
|
|
269
|
-
return { type: 'auto' }
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
function buildAnthropicPayload(requestBody, stream = false) {
|
|
273
|
-
const converted = convertOpenAIToAnthropicMessages(requestBody.messages)
|
|
274
|
-
const payload = {
|
|
275
|
-
model: requestBody.model,
|
|
276
|
-
max_tokens: requestBody.max_tokens || requestBody.max_completion_tokens || requestBody.max_output_tokens || 4096,
|
|
277
|
-
messages: converted.messages,
|
|
278
|
-
stream: Boolean(stream),
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
if (converted.system) payload.system = converted.system
|
|
282
|
-
if (requestBody.temperature != null) payload.temperature = requestBody.temperature
|
|
283
|
-
if (requestBody.top_p != null) payload.top_p = requestBody.top_p
|
|
284
|
-
if (Array.isArray(requestBody.stop) && requestBody.stop.length) payload.stop_sequences = requestBody.stop
|
|
285
|
-
if (typeof requestBody.stop === 'string') payload.stop_sequences = [requestBody.stop]
|
|
286
|
-
|
|
287
|
-
const tools = convertOpenAIToolsToAnthropic(requestBody.tools)
|
|
288
|
-
if (tools.length) payload.tools = tools
|
|
289
|
-
if (requestBody.tool_choice) payload.tool_choice = convertToolChoice(requestBody.tool_choice)
|
|
290
|
-
|
|
291
|
-
return payload
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
function mapFinishReason(stopReason) {
|
|
295
|
-
if (stopReason === 'tool_use') return 'tool_calls'
|
|
296
|
-
if (stopReason === 'max_tokens') return 'length'
|
|
297
|
-
return 'stop'
|
|
298
|
-
}
|
|
299
|
-
|
|
300
|
-
function buildToolCalls(content) {
|
|
301
|
-
const calls = []
|
|
302
|
-
for (const block of content || []) {
|
|
303
|
-
if (block?.type !== 'tool_use') continue
|
|
304
|
-
calls.push({
|
|
305
|
-
id: block.id,
|
|
306
|
-
type: 'function',
|
|
307
|
-
function: {
|
|
308
|
-
name: block.name,
|
|
309
|
-
arguments: JSON.stringify(block.input || {}),
|
|
310
|
-
},
|
|
311
|
-
})
|
|
312
|
-
}
|
|
313
|
-
return calls
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
function anthropicToOpenAIResponse(responseBody, requestedModel) {
|
|
317
|
-
const text = (responseBody.content || [])
|
|
318
|
-
.filter((block) => block?.type === 'text')
|
|
319
|
-
.map((block) => block.text)
|
|
320
|
-
.join('')
|
|
321
|
-
const toolCalls = buildToolCalls(responseBody.content)
|
|
322
|
-
|
|
323
|
-
return {
|
|
324
|
-
id: responseBody.id || `chatcmpl_${Date.now()}`,
|
|
325
|
-
object: 'chat.completion',
|
|
326
|
-
created: Math.floor(Date.now() / 1000),
|
|
327
|
-
model: requestedModel,
|
|
328
|
-
choices: [{
|
|
329
|
-
index: 0,
|
|
330
|
-
message: {
|
|
331
|
-
role: 'assistant',
|
|
332
|
-
content: text || null,
|
|
333
|
-
...(toolCalls.length ? { tool_calls: toolCalls } : {}),
|
|
334
|
-
},
|
|
335
|
-
finish_reason: mapFinishReason(responseBody.stop_reason),
|
|
336
|
-
}],
|
|
337
|
-
usage: responseBody.usage
|
|
338
|
-
? {
|
|
339
|
-
prompt_tokens: responseBody.usage.input_tokens || 0,
|
|
340
|
-
completion_tokens: responseBody.usage.output_tokens || 0,
|
|
341
|
-
total_tokens: (responseBody.usage.input_tokens || 0) + (responseBody.usage.output_tokens || 0),
|
|
342
|
-
}
|
|
343
|
-
: undefined,
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
function pickRoute(model) {
|
|
348
|
-
if (String(model).startsWith('gpt-')) return 'openai'
|
|
349
|
-
if (String(model).startsWith('claude-')) return 'anthropic'
|
|
350
|
-
if (String(model).startsWith('MiniMax-')) return 'minimax'
|
|
351
|
-
return 'openai'
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
function responseOutputToText(output) {
|
|
355
|
-
return (output || [])
|
|
356
|
-
.flatMap((item) => {
|
|
357
|
-
if (item?.type === 'message') return item.content || []
|
|
358
|
-
if (item?.content) return item.content
|
|
359
|
-
return []
|
|
360
|
-
})
|
|
361
|
-
.filter((item) => item?.type === 'output_text' || item?.type === 'text')
|
|
362
|
-
.map((item) => extractOpenAITextContent(item.text || item.content || item))
|
|
363
|
-
.filter(Boolean)
|
|
364
|
-
.join('')
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
function responseOutputToToolCalls(output) {
|
|
368
|
-
return (output || [])
|
|
369
|
-
.filter((item) => item?.type === 'function_call' && item.name)
|
|
370
|
-
.map((item, index) => ({
|
|
371
|
-
id: item.call_id || item.id || `call_${index + 1}`,
|
|
372
|
-
type: 'function',
|
|
373
|
-
function: {
|
|
374
|
-
name: item.name,
|
|
375
|
-
arguments: typeof item.arguments === 'string'
|
|
376
|
-
? item.arguments
|
|
377
|
-
: JSON.stringify(item.arguments || {}),
|
|
378
|
-
},
|
|
379
|
-
}))
|
|
380
|
-
}
|
|
381
|
-
|
|
382
|
-
function responseToChatCompletion(responseBody, requestedModel) {
|
|
383
|
-
const response = responseBody?.response && typeof responseBody.response === 'object'
|
|
384
|
-
? responseBody.response
|
|
385
|
-
: responseBody
|
|
386
|
-
|
|
387
|
-
const text = responseOutputToText(response.output)
|
|
388
|
-
const toolCalls = responseOutputToToolCalls(response.output)
|
|
389
|
-
const status = String(response.status || '').toLowerCase()
|
|
390
|
-
const finishReason = status === 'completed' || status === '' ? 'stop' : 'length'
|
|
391
|
-
const outputTokens = response.usage?.output_tokens || response.usage?.completion_tokens || 0
|
|
392
|
-
const promptTokens = response.usage?.input_tokens || response.usage?.prompt_tokens || 0
|
|
393
|
-
|
|
394
|
-
return {
|
|
395
|
-
id: response.id || `chatcmpl_${Date.now()}`,
|
|
396
|
-
object: 'chat.completion',
|
|
397
|
-
created: response.created_at || Math.floor(Date.now() / 1000),
|
|
398
|
-
model: requestedModel || response.model,
|
|
399
|
-
choices: [{
|
|
400
|
-
index: 0,
|
|
401
|
-
message: {
|
|
402
|
-
role: 'assistant',
|
|
403
|
-
content: text || null,
|
|
404
|
-
...(toolCalls.length ? { tool_calls: toolCalls } : {}),
|
|
405
|
-
},
|
|
406
|
-
finish_reason: finishReason,
|
|
407
|
-
}],
|
|
408
|
-
usage: response.usage
|
|
409
|
-
? {
|
|
410
|
-
prompt_tokens: promptTokens,
|
|
411
|
-
completion_tokens: outputTokens,
|
|
412
|
-
total_tokens: response.usage.total_tokens || (promptTokens + outputTokens),
|
|
413
|
-
}
|
|
414
|
-
: undefined,
|
|
415
|
-
}
|
|
416
|
-
}
|
|
417
|
-
|
|
418
|
-
function normalizeOpenAICompatibleResponse(parsed, requestedModel) {
|
|
419
|
-
if (!parsed || typeof parsed !== 'object') return parsed
|
|
420
|
-
|
|
421
|
-
if (parsed.object === 'response' || Array.isArray(parsed.output)) {
|
|
422
|
-
return responseToChatCompletion(parsed, requestedModel)
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
if (parsed.object === 'chat.completion' || parsed.object === 'chat.completion.chunk') {
|
|
426
|
-
const choice = parsed.choices?.[0]
|
|
427
|
-
if (choice?.message) {
|
|
428
|
-
choice.message = {
|
|
429
|
-
...choice.message,
|
|
430
|
-
content: extractOpenAITextContent(choice.message.content) || null,
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
if (choice?.delta?.content != null) {
|
|
434
|
-
choice.delta = {
|
|
435
|
-
...choice.delta,
|
|
436
|
-
content: extractOpenAITextContent(choice.delta.content),
|
|
437
|
-
}
|
|
438
|
-
}
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
return parsed
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
function parseOpenAIStreamText(text, requestedModel) {
|
|
445
|
-
try {
|
|
446
|
-
const parsed = JSON.parse(String(text || ''))
|
|
447
|
-
if (parsed && typeof parsed === 'object') {
|
|
448
|
-
return normalizeOpenAICompatibleResponse(parsed, requestedModel)
|
|
449
|
-
}
|
|
450
|
-
} catch {}
|
|
451
|
-
|
|
452
|
-
const blocks = String(text || '').split(/\r?\n\r?\n+/).filter(Boolean)
|
|
453
|
-
let responseCompleted = null
|
|
454
|
-
let finalChunk = null
|
|
455
|
-
let content = ''
|
|
456
|
-
let sawOutputTextDelta = false
|
|
457
|
-
|
|
458
|
-
for (const block of blocks) {
|
|
459
|
-
const eventMatch = block.match(/^event:\s*(.+)$/m)
|
|
460
|
-
const dataMatch = block.match(/^data:\s*(.+)$/m)
|
|
461
|
-
if (!dataMatch) continue
|
|
462
|
-
|
|
463
|
-
const eventName = eventMatch ? eventMatch[1].trim() : ''
|
|
464
|
-
const payload = dataMatch[1].trim()
|
|
465
|
-
if (!payload || payload === '[DONE]') continue
|
|
466
|
-
|
|
467
|
-
let chunk
|
|
468
|
-
try {
|
|
469
|
-
chunk = JSON.parse(payload)
|
|
470
|
-
} catch {
|
|
471
|
-
continue
|
|
472
|
-
}
|
|
473
|
-
|
|
474
|
-
if (eventName === 'response.output_text.delta' && typeof chunk.delta === 'string') {
|
|
475
|
-
sawOutputTextDelta = true
|
|
476
|
-
content += chunk.delta
|
|
477
|
-
continue
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
if (eventName === 'response.content_part.done' && chunk.part?.type === 'output_text' && typeof chunk.part.text === 'string') {
|
|
481
|
-
if (!sawOutputTextDelta) content += chunk.part.text
|
|
482
|
-
continue
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
if (eventName === 'response.completed' && chunk.response) {
|
|
486
|
-
responseCompleted = chunk.response
|
|
487
|
-
if (!content) {
|
|
488
|
-
const outputText = responseOutputToText(chunk.response.output)
|
|
489
|
-
if (outputText) content = outputText
|
|
490
|
-
}
|
|
491
|
-
continue
|
|
492
|
-
}
|
|
493
|
-
|
|
494
|
-
finalChunk = chunk
|
|
495
|
-
const choice = chunk.choices?.[0] || {}
|
|
496
|
-
const delta = choice.delta || {}
|
|
497
|
-
const deltaContent = extractOpenAITextContent(delta.content)
|
|
498
|
-
const messageContent = extractOpenAITextContent(choice.message?.content)
|
|
499
|
-
if (deltaContent) content += deltaContent
|
|
500
|
-
else if (messageContent) content += messageContent
|
|
501
|
-
}
|
|
502
|
-
|
|
503
|
-
if (responseCompleted) {
|
|
504
|
-
const completion = responseToChatCompletion(responseCompleted, requestedModel || responseCompleted.model)
|
|
505
|
-
if (!completion.choices?.[0]?.message?.content && content) {
|
|
506
|
-
completion.choices[0].message.content = content
|
|
507
|
-
}
|
|
508
|
-
return completion
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
if (!finalChunk) return null
|
|
512
|
-
|
|
513
|
-
return normalizeOpenAICompatibleResponse({
|
|
514
|
-
id: finalChunk.id || `chatcmpl_${Date.now()}`,
|
|
515
|
-
object: 'chat.completion',
|
|
516
|
-
created: finalChunk.created || Math.floor(Date.now() / 1000),
|
|
517
|
-
model: finalChunk.model,
|
|
518
|
-
choices: [{
|
|
519
|
-
index: 0,
|
|
520
|
-
message: { role: 'assistant', content: content || null },
|
|
521
|
-
finish_reason: finalChunk.choices?.[0]?.finish_reason || 'stop',
|
|
522
|
-
}],
|
|
523
|
-
usage: finalChunk.usage,
|
|
524
|
-
}, requestedModel)
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
async function relayOpenAIRequest(requestBody, config, res) {
|
|
528
|
-
const upstreamBody = {
|
|
529
|
-
...requestBody,
|
|
530
|
-
stream: requestBody.stream === true,
|
|
531
|
-
}
|
|
532
|
-
const upstream = await upstreamFetch(`${config.baseUrlOpenAI.replace(/\/+$/, '')}/chat/completions`, {
|
|
533
|
-
method: 'POST',
|
|
534
|
-
headers: {
|
|
535
|
-
'content-type': 'application/json',
|
|
536
|
-
authorization: `Bearer ${config.apiKey}`,
|
|
537
|
-
'user-agent': 'holysheep-openclaw-bridge/1.0',
|
|
538
|
-
},
|
|
539
|
-
body: JSON.stringify(upstreamBody),
|
|
540
|
-
})
|
|
541
|
-
|
|
542
|
-
// 流式请求:直接字节透传(已是 OpenAI SSE 格式,无需转换)
|
|
543
|
-
if (requestBody.stream === true && upstream.ok) {
|
|
544
|
-
res.writeHead(200, {
|
|
545
|
-
'content-type': 'text/event-stream; charset=utf-8',
|
|
546
|
-
'cache-control': 'no-cache, no-transform',
|
|
547
|
-
connection: 'keep-alive',
|
|
548
|
-
})
|
|
549
|
-
try { await pipeStream(upstream.body, (chunk) => res.write(chunk)) } catch {}
|
|
550
|
-
if (!res.writableEnded) res.end()
|
|
551
|
-
return
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
const text = await upstream.text()
|
|
555
|
-
const parsed = parseOpenAIStreamText(text, requestBody.model)
|
|
556
|
-
if (upstream.ok && parsed) {
|
|
557
|
-
if (requestBody.stream) return sendOpenAIStream(res, parsed)
|
|
558
|
-
return sendJson(res, upstream.status, parsed)
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
res.writeHead(upstream.status, {
|
|
562
|
-
'content-type': upstream.headers.get('content-type') || 'application/json; charset=utf-8',
|
|
563
|
-
'cache-control': upstream.headers.get('cache-control') || 'no-store',
|
|
564
|
-
})
|
|
565
|
-
res.end(text)
|
|
566
|
-
}
|
|
567
|
-
|
|
568
|
-
// 兼容 native fetch (ReadableStream) 和 node-fetch v2 (Node.js stream) 的流读取
|
|
569
|
-
async function pipeStream(body, onChunk) {
|
|
570
|
-
if (body == null) return
|
|
571
|
-
if (typeof body.getReader === 'function') {
|
|
572
|
-
const reader = body.getReader()
|
|
573
|
-
const decoder = new TextDecoder()
|
|
574
|
-
try {
|
|
575
|
-
while (true) {
|
|
576
|
-
const { done, value } = await reader.read()
|
|
577
|
-
if (done) break
|
|
578
|
-
onChunk(decoder.decode(value, { stream: true }))
|
|
579
|
-
}
|
|
580
|
-
onChunk(decoder.decode())
|
|
581
|
-
} finally {
|
|
582
|
-
reader.releaseLock()
|
|
583
|
-
}
|
|
584
|
-
} else {
|
|
585
|
-
for await (const chunk of body) {
|
|
586
|
-
onChunk(typeof chunk === 'string' ? chunk : chunk.toString())
|
|
587
|
-
}
|
|
588
|
-
}
|
|
589
|
-
}
|
|
590
|
-
|
|
591
|
-
// Anthropic SSE → OpenAI SSE 实时透传(避免整包缓冲导致 OpenClaw timeout)
|
|
592
|
-
async function relayAnthropicStream(requestBody, config, route, res) {
|
|
593
|
-
const payload = buildAnthropicPayload(requestBody, true)
|
|
594
|
-
const baseUrl = route === 'minimax'
|
|
595
|
-
? `${config.baseUrlAnthropic.replace(/\/+$/, '')}/minimax/v1/messages`
|
|
596
|
-
: `${config.baseUrlAnthropic.replace(/\/+$/, '')}/v1/messages`
|
|
597
|
-
|
|
598
|
-
let upstream
|
|
599
|
-
try {
|
|
600
|
-
upstream = await upstreamFetch(baseUrl, {
|
|
601
|
-
method: 'POST',
|
|
602
|
-
headers: {
|
|
603
|
-
'content-type': 'application/json',
|
|
604
|
-
'x-api-key': config.apiKey,
|
|
605
|
-
'anthropic-version': '2023-06-01',
|
|
606
|
-
'user-agent': 'holysheep-openclaw-bridge/1.0',
|
|
607
|
-
},
|
|
608
|
-
body: JSON.stringify(payload),
|
|
609
|
-
})
|
|
610
|
-
} catch (err) {
|
|
611
|
-
return sendJson(res, 500, { error: { message: err.message || 'Bridge upstream error' } })
|
|
612
|
-
}
|
|
613
|
-
|
|
614
|
-
if (!upstream.ok) {
|
|
615
|
-
let errBody
|
|
616
|
-
try { errBody = JSON.parse(await upstream.text()) } catch { errBody = { error: { message: 'Upstream error' } } }
|
|
617
|
-
return sendJson(res, upstream.status, errBody)
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
res.writeHead(200, {
|
|
621
|
-
'content-type': 'text/event-stream; charset=utf-8',
|
|
622
|
-
'cache-control': 'no-cache, no-transform',
|
|
623
|
-
connection: 'keep-alive',
|
|
624
|
-
})
|
|
625
|
-
|
|
626
|
-
const msgId = `chatcmpl_${Date.now()}`
|
|
627
|
-
const created = Math.floor(Date.now() / 1000)
|
|
628
|
-
const model = requestBody.model
|
|
629
|
-
let headerSent = false
|
|
630
|
-
let inputTokens = 0
|
|
631
|
-
// tool_use 流式:按 content block index 收集
|
|
632
|
-
const toolBlocks = {} // index → {id, name, argsBuf}
|
|
633
|
-
|
|
634
|
-
function writeChunk(delta, finishReason, usage) {
|
|
635
|
-
const chunk = {
|
|
636
|
-
id: msgId,
|
|
637
|
-
object: 'chat.completion.chunk',
|
|
638
|
-
created,
|
|
639
|
-
model,
|
|
640
|
-
choices: [{ index: 0, delta, finish_reason: finishReason || null }],
|
|
641
|
-
}
|
|
642
|
-
if (usage) chunk.usage = usage
|
|
643
|
-
res.write(`data: ${JSON.stringify(chunk)}\n\n`)
|
|
644
|
-
}
|
|
645
|
-
|
|
646
|
-
function handleEvent(event, data) {
|
|
647
|
-
if (data === '[DONE]') return
|
|
648
|
-
let obj
|
|
649
|
-
try { obj = JSON.parse(data) } catch { return }
|
|
650
|
-
|
|
651
|
-
if (event === 'message_start') {
|
|
652
|
-
inputTokens = obj.message?.usage?.input_tokens || 0
|
|
653
|
-
if (!headerSent) {
|
|
654
|
-
writeChunk({ role: 'assistant', content: '' }, null, null)
|
|
655
|
-
headerSent = true
|
|
656
|
-
}
|
|
657
|
-
} else if (event === 'content_block_start') {
|
|
658
|
-
if (!headerSent) { writeChunk({ role: 'assistant', content: '' }, null, null); headerSent = true }
|
|
659
|
-
const block = obj.content_block || {}
|
|
660
|
-
if (block.type === 'tool_use') {
|
|
661
|
-
toolBlocks[obj.index] = { id: block.id, name: block.name, argsBuf: '' }
|
|
662
|
-
writeChunk({
|
|
663
|
-
tool_calls: [{ index: obj.index, id: block.id, type: 'function', function: { name: block.name, arguments: '' } }],
|
|
664
|
-
}, null, null)
|
|
665
|
-
}
|
|
666
|
-
} else if (event === 'content_block_delta') {
|
|
667
|
-
if (!headerSent) { writeChunk({ role: 'assistant', content: '' }, null, null); headerSent = true }
|
|
668
|
-
const delta = obj.delta || {}
|
|
669
|
-
if (delta.type === 'text_delta' && typeof delta.text === 'string') {
|
|
670
|
-
writeChunk({ content: delta.text }, null, null)
|
|
671
|
-
} else if (delta.type === 'input_json_delta' && typeof delta.partial_json === 'string') {
|
|
672
|
-
if (toolBlocks[obj.index]) toolBlocks[obj.index].argsBuf += delta.partial_json
|
|
673
|
-
writeChunk({ tool_calls: [{ index: obj.index, function: { arguments: delta.partial_json } }] }, null, null)
|
|
674
|
-
}
|
|
675
|
-
} else if (event === 'message_delta') {
|
|
676
|
-
const stopReason = obj.delta?.stop_reason
|
|
677
|
-
const finishReason = mapFinishReason(stopReason)
|
|
678
|
-
const outputTokens = obj.usage?.output_tokens || 0
|
|
679
|
-
const usage = {
|
|
680
|
-
prompt_tokens: inputTokens,
|
|
681
|
-
completion_tokens: outputTokens,
|
|
682
|
-
total_tokens: inputTokens + outputTokens,
|
|
683
|
-
}
|
|
684
|
-
if (!headerSent) { writeChunk({ role: 'assistant', content: '' }, null, null); headerSent = true }
|
|
685
|
-
writeChunk({}, finishReason, usage)
|
|
686
|
-
}
|
|
687
|
-
}
|
|
688
|
-
|
|
689
|
-
let buf = ''
|
|
690
|
-
let curEvent = ''
|
|
691
|
-
|
|
692
|
-
function processBuffer(text) {
|
|
693
|
-
buf += text
|
|
694
|
-
const lines = buf.split('\n')
|
|
695
|
-
buf = lines.pop() ?? ''
|
|
696
|
-
for (const line of lines) {
|
|
697
|
-
const trimmed = line.trimEnd()
|
|
698
|
-
if (trimmed.startsWith('event:')) {
|
|
699
|
-
curEvent = trimmed.slice(6).trim()
|
|
700
|
-
} else if (trimmed.startsWith('data:')) {
|
|
701
|
-
handleEvent(curEvent, trimmed.slice(5).trim())
|
|
702
|
-
curEvent = ''
|
|
703
|
-
}
|
|
704
|
-
}
|
|
705
|
-
}
|
|
706
|
-
|
|
707
|
-
try {
|
|
708
|
-
await pipeStream(upstream.body, processBuffer)
|
|
709
|
-
} catch {}
|
|
710
|
-
|
|
711
|
-
if (!res.writableEnded) res.end('data: [DONE]\n\n')
|
|
712
|
-
}
|
|
713
|
-
|
|
714
|
-
async function relayAnthropicRequest(requestBody, config, route, res) {
|
|
715
|
-
if (requestBody.stream === true) {
|
|
716
|
-
return relayAnthropicStream(requestBody, config, route, res)
|
|
717
|
-
}
|
|
718
|
-
const payload = buildAnthropicPayload(requestBody)
|
|
719
|
-
const baseUrl = route === 'minimax'
|
|
720
|
-
? `${config.baseUrlAnthropic.replace(/\/+$/, '')}/minimax/v1/messages`
|
|
721
|
-
: `${config.baseUrlAnthropic.replace(/\/+$/, '')}/v1/messages`
|
|
722
|
-
|
|
723
|
-
const upstream = await upstreamFetch(baseUrl, {
|
|
724
|
-
method: 'POST',
|
|
725
|
-
headers: {
|
|
726
|
-
'content-type': 'application/json',
|
|
727
|
-
'x-api-key': config.apiKey,
|
|
728
|
-
'anthropic-version': '2023-06-01',
|
|
729
|
-
'user-agent': 'holysheep-openclaw-bridge/1.0',
|
|
730
|
-
},
|
|
731
|
-
body: JSON.stringify(payload),
|
|
732
|
-
})
|
|
733
|
-
|
|
734
|
-
const text = await upstream.text()
|
|
735
|
-
let body
|
|
736
|
-
try {
|
|
737
|
-
body = JSON.parse(text)
|
|
738
|
-
} catch {
|
|
739
|
-
body = { error: { message: text || 'Invalid upstream response' } }
|
|
740
|
-
}
|
|
741
|
-
|
|
742
|
-
if (!upstream.ok) {
|
|
743
|
-
return sendJson(res, upstream.status, body)
|
|
744
|
-
}
|
|
745
|
-
|
|
746
|
-
const openaiBody = anthropicToOpenAIResponse(body, requestBody.model)
|
|
747
|
-
if (requestBody.stream) return sendOpenAIStream(res, openaiBody)
|
|
748
|
-
return sendJson(res, 200, openaiBody)
|
|
749
|
-
}
|
|
750
|
-
|
|
751
|
-
function buildModelsResponse(config) {
|
|
752
|
-
return {
|
|
753
|
-
object: 'list',
|
|
754
|
-
data: (config.models || []).map((model) => ({
|
|
755
|
-
id: model,
|
|
756
|
-
object: 'model',
|
|
757
|
-
owned_by: 'holysheep',
|
|
758
|
-
})),
|
|
759
|
-
}
|
|
760
|
-
}
|
|
761
|
-
|
|
762
|
-
// ── Live HolySheep model list for OpenClaw ───────────────────────────────────
|
|
763
|
-
// OpenClaw / AcpModelSelector hits `/v1/models` to populate its model dropdown.
|
|
764
|
-
// Historically this returned only the static `config.models` from
|
|
765
|
-
// `~/.openclaw/holysheep-bridge.json`, so users saw a stale hand-curated list.
|
|
766
|
-
// We now fetch the full live catalog from the HolySheep API once per bridge
|
|
767
|
-
// process (60s TTL) and merge it with the user's preferred models. The
|
|
768
|
-
// config.models always wins on ordering — new entries from upstream are
|
|
769
|
-
// appended afterwards so existing users don't see their default model jump.
|
|
770
|
-
//
|
|
771
|
-
// Env opt-out: HOLYSHEEP_BRIDGE_NO_LIVE_MODELS=1 keeps the old static behavior.
|
|
772
|
-
let _liveModelsCache = { at: 0, ids: null }
|
|
773
|
-
const LIVE_MODELS_TTL_MS = 60_000
|
|
774
|
-
|
|
775
|
-
async function fetchLiveHolySheepModels(config) {
|
|
776
|
-
if (process.env.HOLYSHEEP_BRIDGE_NO_LIVE_MODELS === '1') return null
|
|
777
|
-
const now = Date.now()
|
|
778
|
-
if (_liveModelsCache.ids && now - _liveModelsCache.at < LIVE_MODELS_TTL_MS) {
|
|
779
|
-
return _liveModelsCache.ids
|
|
780
|
-
}
|
|
781
|
-
const base = (config.baseUrlOpenAI || 'https://api.holysheep.ai/v1').replace(/\/+$/, '')
|
|
782
|
-
const url = `${base}/models`
|
|
783
|
-
const apiKey = config.apiKey
|
|
784
|
-
if (!apiKey) return null
|
|
785
|
-
try {
|
|
786
|
-
const resp = await upstreamFetch(url, {
|
|
787
|
-
method: 'GET',
|
|
788
|
-
headers: { authorization: `Bearer ${apiKey}` },
|
|
789
|
-
})
|
|
790
|
-
if (!resp.ok) return null
|
|
791
|
-
const body = await resp.json()
|
|
792
|
-
const ids = Array.isArray(body?.data)
|
|
793
|
-
? body.data.map((m) => (m && typeof m.id === 'string' ? m.id : null)).filter(Boolean)
|
|
794
|
-
: null
|
|
795
|
-
if (ids && ids.length) {
|
|
796
|
-
_liveModelsCache = { at: now, ids }
|
|
797
|
-
return ids
|
|
798
|
-
}
|
|
799
|
-
} catch {
|
|
800
|
-
// Network error / upstream 5xx — fall back to static config.models.
|
|
801
|
-
}
|
|
802
|
-
return null
|
|
803
|
-
}
|
|
804
|
-
|
|
805
|
-
async function buildLiveModelsResponse(config) {
|
|
806
|
-
const live = await fetchLiveHolySheepModels(config)
|
|
807
|
-
const configured = config.models || []
|
|
808
|
-
if (!live) return buildModelsResponse(config)
|
|
809
|
-
// Merge: preserve user's preferred ordering from config.models (typically
|
|
810
|
-
// the models they actively use for OpenClaw), then append new live entries.
|
|
811
|
-
const seen = new Set(configured)
|
|
812
|
-
const merged = [...configured]
|
|
813
|
-
for (const id of live) {
|
|
814
|
-
if (!seen.has(id)) {
|
|
815
|
-
merged.push(id)
|
|
816
|
-
seen.add(id)
|
|
817
|
-
}
|
|
818
|
-
}
|
|
819
|
-
return {
|
|
820
|
-
object: 'list',
|
|
821
|
-
data: merged.map((model) => ({
|
|
822
|
-
id: model,
|
|
823
|
-
object: 'model',
|
|
824
|
-
owned_by: 'holysheep',
|
|
825
|
-
})),
|
|
826
|
-
}
|
|
827
|
-
}
|
|
828
|
-
|
|
829
|
-
function isProcessAlive(pid) {
|
|
830
|
-
if (!Number.isInteger(pid) || pid <= 0) return null
|
|
831
|
-
try {
|
|
832
|
-
process.kill(pid, 0)
|
|
833
|
-
return true
|
|
834
|
-
} catch (error) {
|
|
835
|
-
if (error && error.code === 'EPERM') return true
|
|
836
|
-
return false
|
|
837
|
-
}
|
|
838
|
-
}
|
|
839
|
-
|
|
840
|
-
async function checkGatewayHealth(config) {
|
|
841
|
-
const gatewayPort = Number(config.gatewayPort)
|
|
842
|
-
if (!Number.isInteger(gatewayPort) || gatewayPort <= 0) {
|
|
843
|
-
return { ok: true, reason: 'no_gateway_port' }
|
|
844
|
-
}
|
|
845
|
-
|
|
846
|
-
const gatewayPid = Number(config.gatewayPid)
|
|
847
|
-
const pidAlive = isProcessAlive(gatewayPid)
|
|
848
|
-
if (pidAlive === false) {
|
|
849
|
-
return { ok: false, reason: 'gateway_pid_exited' }
|
|
850
|
-
}
|
|
851
|
-
|
|
852
|
-
const host = config.gatewayHost || '127.0.0.1'
|
|
853
|
-
const timeout = Number(config.watchdog?.requestTimeoutMs) || DEFAULT_WATCHDOG_REQUEST_TIMEOUT_MS
|
|
854
|
-
|
|
855
|
-
// 用 http.get + family:4 强制 IPv4,避免 Windows 上 Node.js 优先尝试 IPv6 (::1)
|
|
856
|
-
// 而 Gateway 只监听 127.0.0.1 导致 Watchdog 误判自杀
|
|
857
|
-
try {
|
|
858
|
-
const http = require('http')
|
|
859
|
-
await new Promise((resolve, reject) => {
|
|
860
|
-
const req = http.get({ hostname: host, port: gatewayPort, path: '/', family: 4 }, resolve)
|
|
861
|
-
req.setTimeout(timeout, () => { req.destroy(); reject(new Error('timeout')) })
|
|
862
|
-
req.on('error', reject)
|
|
863
|
-
})
|
|
864
|
-
return { ok: true, reason: 'gateway_http_ok' }
|
|
865
|
-
} catch {
|
|
866
|
-
return { ok: false, reason: 'gateway_http_unreachable' }
|
|
867
|
-
}
|
|
868
|
-
}
|
|
869
|
-
|
|
870
|
-
function stopBridge(server, reason) {
|
|
871
|
-
process.stdout.write(`HolySheep OpenClaw bridge stopping: ${reason}\n`)
|
|
872
|
-
server.close(() => process.exit(0))
|
|
873
|
-
setTimeout(() => process.exit(0), 250).unref()
|
|
874
|
-
}
|
|
875
|
-
|
|
876
|
-
function startGatewayWatchdog(server, configPath = BRIDGE_CONFIG_FILE) {
|
|
877
|
-
const bridgeStartedAt = Date.now()
|
|
878
|
-
let consecutiveFailures = 0
|
|
879
|
-
let stopping = false
|
|
880
|
-
|
|
881
|
-
const timer = setInterval(async () => {
|
|
882
|
-
if (stopping) return
|
|
883
|
-
|
|
884
|
-
let config
|
|
885
|
-
try {
|
|
886
|
-
config = readBridgeConfig(configPath)
|
|
887
|
-
} catch {
|
|
888
|
-
stopping = true
|
|
889
|
-
stopBridge(server, 'bridge config missing')
|
|
890
|
-
return
|
|
891
|
-
}
|
|
892
|
-
|
|
893
|
-
const watchdog = config.watchdog || {}
|
|
894
|
-
if (watchdog.enabled === false) return
|
|
895
|
-
|
|
896
|
-
const startupGraceMs = Number(watchdog.startupGraceMs) || DEFAULT_WATCHDOG_STARTUP_GRACE_MS
|
|
897
|
-
const failureThreshold = Number(watchdog.failureThreshold) || DEFAULT_WATCHDOG_FAILURE_THRESHOLD
|
|
898
|
-
const health = await checkGatewayHealth(config)
|
|
899
|
-
|
|
900
|
-
if (health.ok) {
|
|
901
|
-
consecutiveFailures = 0
|
|
902
|
-
return
|
|
903
|
-
}
|
|
904
|
-
|
|
905
|
-
const gatewayStartedAt = Date.parse(config.gatewayStartedAt || '') || bridgeStartedAt
|
|
906
|
-
if (Date.now() - gatewayStartedAt < startupGraceMs) {
|
|
907
|
-
return
|
|
908
|
-
}
|
|
909
|
-
|
|
910
|
-
consecutiveFailures += 1
|
|
911
|
-
if (consecutiveFailures < failureThreshold) return
|
|
912
|
-
|
|
913
|
-
stopping = true
|
|
914
|
-
stopBridge(server, `OpenClaw Gateway unavailable (${health.reason})`)
|
|
915
|
-
}, DEFAULT_WATCHDOG_INTERVAL_MS)
|
|
916
|
-
|
|
917
|
-
timer.unref()
|
|
918
|
-
server.on('close', () => clearInterval(timer))
|
|
919
|
-
}
|
|
920
|
-
|
|
921
|
-
function createBridgeServer(configPath = BRIDGE_CONFIG_FILE) {
|
|
922
|
-
return http.createServer(async (req, res) => {
|
|
923
|
-
if (req.method === 'OPTIONS') {
|
|
924
|
-
res.writeHead(204, {
|
|
925
|
-
'access-control-allow-origin': '*',
|
|
926
|
-
'access-control-allow-methods': 'GET,POST,OPTIONS',
|
|
927
|
-
'access-control-allow-headers': 'content-type,authorization,x-api-key,anthropic-version',
|
|
928
|
-
})
|
|
929
|
-
return res.end()
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
try {
|
|
933
|
-
const config = readBridgeConfig(configPath)
|
|
934
|
-
const url = new URL(req.url, `http://${req.headers.host || '127.0.0.1'}`)
|
|
935
|
-
|
|
936
|
-
if (req.method === 'GET' && url.pathname === '/health') {
|
|
937
|
-
return sendJson(res, 200, { ok: true, port: config.port, models: config.models || [] })
|
|
938
|
-
}
|
|
939
|
-
|
|
940
|
-
if (req.method === 'GET' && url.pathname === '/v1/models') {
|
|
941
|
-
return sendJson(res, 200, buildModelsResponse(config))
|
|
942
|
-
}
|
|
943
|
-
|
|
944
|
-
if (req.method === 'POST' && url.pathname === '/v1/chat/completions') {
|
|
945
|
-
const requestBody = await readJsonBody(req)
|
|
946
|
-
const route = pickRoute(requestBody.model)
|
|
947
|
-
if (route === 'openai') return relayOpenAIRequest(requestBody, config, res)
|
|
948
|
-
return relayAnthropicRequest(requestBody, config, route, res)
|
|
949
|
-
}
|
|
950
|
-
|
|
951
|
-
return sendJson(res, 404, { error: { message: 'Not found' } })
|
|
952
|
-
} catch (error) {
|
|
953
|
-
return sendJson(res, 500, { error: { message: error.message || 'Bridge error' } })
|
|
954
|
-
}
|
|
955
|
-
})
|
|
956
|
-
}
|
|
957
|
-
|
|
958
|
-
function startBridge(args = parseArgs(process.argv.slice(2))) {
|
|
959
|
-
const config = readBridgeConfig(args.config)
|
|
960
|
-
const port = args.port || config.port
|
|
961
|
-
const host = args.host || '127.0.0.1'
|
|
962
|
-
const server = createBridgeServer(args.config)
|
|
963
|
-
|
|
964
|
-
server.listen(port, host, () => {
|
|
965
|
-
process.stdout.write(`HolySheep OpenClaw bridge listening on http://${host}:${port}\n`)
|
|
966
|
-
})
|
|
967
|
-
startGatewayWatchdog(server, args.config)
|
|
968
|
-
|
|
969
|
-
return server
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
if (require.main === module) {
|
|
973
|
-
startBridge()
|
|
974
|
-
}
|
|
975
|
-
|
|
976
|
-
module.exports = {
|
|
977
|
-
BRIDGE_CONFIG_FILE,
|
|
978
|
-
buildAnthropicPayload,
|
|
979
|
-
anthropicToOpenAIResponse,
|
|
980
|
-
buildModelsResponse,
|
|
981
|
-
createBridgeServer,
|
|
982
|
-
parseArgs,
|
|
983
|
-
parseOpenAIStreamText,
|
|
984
|
-
pickRoute,
|
|
985
|
-
readBridgeConfig,
|
|
986
|
-
startBridge,
|
|
987
|
-
}
|